summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2020-05-05 09:19:02 +0200
committerMichaël Zasso <targos@protonmail.com>2020-05-12 16:12:13 +0200
commit1d6adf7432defeb39b751a19c68335e8afb0d8ee (patch)
tree7ab67931110b8d9db770d774c7a6d0d14c976c15
parentaee36a04475a20c13663d1037aa6f175ff368bc7 (diff)
downloadnode-new-1d6adf7432defeb39b751a19c68335e8afb0d8ee.tar.gz
deps: update V8 to 8.3.110.9
PR-URL: https://github.com/nodejs/node/pull/32831 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
-rw-r--r--deps/v8/.git-blame-ignore-revs3
-rw-r--r--deps/v8/AUTHORS6
-rw-r--r--deps/v8/BUILD.gn211
-rw-r--r--deps/v8/COMMON_OWNERS4
-rw-r--r--deps/v8/DEPS68
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h64
-rw-r--r--deps/v8/gni/proto_library.gni36
-rw-r--r--deps/v8/gni/v8.gni45
-rw-r--r--deps/v8/include/OWNERS1
-rw-r--r--deps/v8/include/cppgc/README.md5
-rw-r--r--deps/v8/include/cppgc/allocation.h91
-rw-r--r--deps/v8/include/cppgc/finalizer-trait.h90
-rw-r--r--deps/v8/include/cppgc/garbage-collected.h53
-rw-r--r--deps/v8/include/cppgc/gc-info.h43
-rw-r--r--deps/v8/include/cppgc/heap.h31
-rw-r--r--deps/v8/include/cppgc/internals.h41
-rw-r--r--deps/v8/include/cppgc/platform.h31
-rw-r--r--deps/v8/include/js_protocol.pdl36
-rw-r--r--deps/v8/include/v8-fast-api-calls.h408
-rw-r--r--deps/v8/include/v8-inspector.h58
-rw-r--r--deps/v8/include/v8-internal.h4
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h411
-rw-r--r--deps/v8/infra/testing/builders.pyl15
-rw-r--r--deps/v8/samples/hello-world.cc9
-rw-r--r--deps/v8/samples/process.cc34
-rw-r--r--deps/v8/samples/shell.cc69
-rw-r--r--deps/v8/src/DEPS1
-rw-r--r--deps/v8/src/api/api-inl.h2
-rw-r--r--deps/v8/src/api/api.cc384
-rw-r--r--deps/v8/src/api/api.h10
-rw-r--r--deps/v8/src/asmjs/asm-js.cc64
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h16
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc159
-rw-r--r--deps/v8/src/ast/ast-value-factory.h94
-rw-r--r--deps/v8/src/ast/ast.cc152
-rw-r--r--deps/v8/src/ast/ast.h340
-rw-r--r--deps/v8/src/ast/modules.cc28
-rw-r--r--deps/v8/src/ast/modules.h6
-rw-r--r--deps/v8/src/ast/prettyprinter.cc89
-rw-r--r--deps/v8/src/ast/prettyprinter.h10
-rw-r--r--deps/v8/src/ast/scopes.cc76
-rw-r--r--deps/v8/src/ast/scopes.h19
-rw-r--r--deps/v8/src/ast/variables.h3
-rw-r--r--deps/v8/src/base/address-region.h10
-rw-r--r--deps/v8/src/base/atomic-utils.h29
-rw-r--r--deps/v8/src/base/atomicops.h87
-rw-r--r--deps/v8/src/base/atomicops_internals_atomicword_compat.h6
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h10
-rw-r--r--deps/v8/src/base/atomicops_internals_std.h14
-rw-r--r--deps/v8/src/base/bits-iterator.h58
-rw-r--r--deps/v8/src/base/build_config.h17
-rw-r--r--deps/v8/src/base/cpu.cc78
-rw-r--r--deps/v8/src/base/enum-set.h41
-rw-r--r--deps/v8/src/base/macros.h10
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc18
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc5
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc44
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc26
-rw-r--r--deps/v8/src/base/platform/platform.h36
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc150
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc156
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq5
-rw-r--r--deps/v8/src/builtins/base.tq117
-rw-r--r--deps/v8/src/builtins/builtins-api.cc25
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc33
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc42
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc37
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc52
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h108
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h28
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc108
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h16
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc94
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc26
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc24
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc51
-rw-r--r--deps/v8/src/builtins/builtins-string.tq2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h14
-rw-r--r--deps/v8/src/builtins/builtins-utils.h23
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc186
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc79
-rw-r--r--deps/v8/src/builtins/builtins.cc2
-rw-r--r--deps/v8/src/builtins/cast.tq4
-rw-r--r--deps/v8/src/builtins/convert.tq6
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc265
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq32
-rw-r--r--deps/v8/src/builtins/iterator.tq8
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc104
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc104
-rw-r--r--deps/v8/src/builtins/number.tq2
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc159
-rw-r--r--deps/v8/src/builtins/promise-abstract-operations.tq52
-rw-r--r--deps/v8/src/builtins/promise-misc.tq59
-rw-r--r--deps/v8/src/builtins/promise-reaction-job.tq50
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq9
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc363
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc7
-rw-r--r--deps/v8/src/builtins/string-repeat.tq2
-rw-r--r--deps/v8/src/builtins/torque-internal.tq11
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq2
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc619
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc66
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h21
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h12
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h13
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc94
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h19
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h39
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.cc22
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h32
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h232
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc392
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h130
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h26
-rw-r--r--deps/v8/src/codegen/assembler-arch.h2
-rw-r--r--deps/v8/src/codegen/assembler-inl.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc3
-rw-r--r--deps/v8/src/codegen/assembler.h2
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc568
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h196
-rw-r--r--deps/v8/src/codegen/compiler.cc489
-rw-r--r--deps/v8/src/codegen/compiler.h49
-rw-r--r--deps/v8/src/codegen/constant-pool.cc4
-rw-r--r--deps/v8/src/codegen/constant-pool.h4
-rw-r--r--deps/v8/src/codegen/constants-arch.h2
-rw-r--r--deps/v8/src/codegen/cpu-features.h2
-rw-r--r--deps/v8/src/codegen/external-reference.cc8
-rw-r--r--deps/v8/src/codegen/external-reference.h1
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h2
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc88
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h59
-rw-r--r--deps/v8/src/codegen/ia32/sse-instr.h6
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc27
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h170
-rw-r--r--deps/v8/src/codegen/machine-type.h6
-rw-r--r--deps/v8/src/codegen/macro-assembler.h2
-rw-r--r--deps/v8/src/codegen/mips/constants-mips.h15
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc27
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc54
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h14
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc27
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc54
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h14
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc4
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h4
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.cc4
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h16
-rw-r--r--deps/v8/src/codegen/ppc/cpu-ppc.cc4
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc4
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc84
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h22
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h11
-rw-r--r--deps/v8/src/codegen/register-arch.h2
-rw-r--r--deps/v8/src/codegen/register-configuration.cc2
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h77
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc10
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h17
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.h11
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc320
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h91
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h12
-rw-r--r--deps/v8/src/codegen/source-position-table.cc63
-rw-r--r--deps/v8/src/codegen/source-position-table.h46
-rw-r--r--deps/v8/src/codegen/tnode.h6
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h1
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc76
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h57
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc125
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h112
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h24
-rw-r--r--deps/v8/src/common/globals.h43
-rw-r--r--deps/v8/src/common/message-template.h13
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc237
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h6
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc6
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc107
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc306
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h6
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc6
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc81
-rw-r--r--deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc3
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h4
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc61
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h5
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc316
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h24
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc24
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc144
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h3
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc1
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h20
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc137
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h21
-rw-r--r--deps/v8/src/compiler/backend/instruction.h8
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc17
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc37
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h3
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc11
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc5
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc36
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h3
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc13
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc5
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc22
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc8
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc17
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h1
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc915
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h86
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc82
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc465
-rw-r--r--deps/v8/src/compiler/backend/unwinding-info-writer.h2
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc676
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h3
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc3
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc73
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc23
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc70
-rw-r--r--deps/v8/src/compiler/c-linkage.cc3
-rw-r--r--deps/v8/src/compiler/code-assembler.cc115
-rw-r--r--deps/v8/src/compiler/code-assembler.h112
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc6
-rw-r--r--deps/v8/src/compiler/common-node-cache.h39
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc3
-rw-r--r--deps/v8/src/compiler/common-operator.cc28
-rw-r--r--deps/v8/src/compiler/common-operator.h9
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc101
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc5
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc111
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc14
-rw-r--r--deps/v8/src/compiler/graph-assembler.h4
-rw-r--r--deps/v8/src/compiler/heap-refs.h8
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc10
-rw-r--r--deps/v8/src/compiler/int64-lowering.h19
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc141
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc31
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc41
-rw-r--r--deps/v8/src/compiler/js-graph.cc2
-rw-r--r--deps/v8/src/compiler/js-graph.h3
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc97
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h1
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc14
-rw-r--r--deps/v8/src/compiler/js-inlining.cc24
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc40
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc76
-rw-r--r--deps/v8/src/compiler/linkage.cc4
-rw-r--r--deps/v8/src/compiler/linkage.h22
-rw-r--r--deps/v8/src/compiler/machine-graph.cc9
-rw-r--r--deps/v8/src/compiler/machine-graph.h2
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc58
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/machine-operator.cc100
-rw-r--r--deps/v8/src/compiler/machine-operator.h6
-rw-r--r--deps/v8/src/compiler/node-cache.cc125
-rw-r--r--deps/v8/src/compiler/node-cache.h40
-rw-r--r--deps/v8/src/compiler/node-matchers.h33
-rw-r--r--deps/v8/src/compiler/opcodes.h64
-rw-r--r--deps/v8/src/compiler/operation-typer.cc96
-rw-r--r--deps/v8/src/compiler/operator.h23
-rw-r--r--deps/v8/src/compiler/pipeline.cc11
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc1
-rw-r--r--deps/v8/src/compiler/representation-change.cc6
-rw-r--r--deps/v8/src/compiler/representation-change.h3
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc19
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc162
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h1
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc137
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc103
-rw-r--r--deps/v8/src/compiler/simplified-operator.h48
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc30
-rw-r--r--deps/v8/src/compiler/state-values-utils.h18
-rw-r--r--deps/v8/src/compiler/type-cache.h2
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc6
-rw-r--r--deps/v8/src/compiler/typer.cc424
-rw-r--r--deps/v8/src/compiler/types.cc36
-rw-r--r--deps/v8/src/compiler/types.h37
-rw-r--r--deps/v8/src/compiler/verifier.cc67
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1257
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h81
-rw-r--r--deps/v8/src/d8/OWNERS1
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc42
-rw-r--r--deps/v8/src/d8/d8-console.cc4
-rw-r--r--deps/v8/src/d8/d8-posix.cc219
-rw-r--r--deps/v8/src/d8/d8.cc485
-rw-r--r--deps/v8/src/d8/d8.h12
-rw-r--r--deps/v8/src/date/date.cc2
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc2
-rw-r--r--deps/v8/src/debug/debug-coverage.cc6
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc8
-rw-r--r--deps/v8/src/debug/debug-interface.h16
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc37
-rw-r--r--deps/v8/src/debug/debug-scopes.cc13
-rw-r--r--deps/v8/src/debug/debug.cc71
-rw-r--r--deps/v8/src/debug/debug.h9
-rw-r--r--deps/v8/src/debug/liveedit.cc2
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc4
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc3
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc118
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h61
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.cc38
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.h43
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/session.cc48
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/session.h59
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/target.cc65
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/target.h57
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/transport.cc444
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/transport.h183
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/util.h27
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc7
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc27
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h1
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc51
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h9
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc7
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc7
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc7
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc7
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc7
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc15
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc27
-rw-r--r--deps/v8/src/diagnostics/code-tracer.h4
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc2
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc39
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc53
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h5
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc4
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc11
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc33
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.h6
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc45
-rw-r--r--deps/v8/src/execution/arguments-inl.h18
-rw-r--r--deps/v8/src/execution/arguments.h46
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h39
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc132
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.h2
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h41
-rw-r--r--deps/v8/src/execution/arm64/pointer-authentication-arm64.h164
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc89
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h126
-rw-r--r--deps/v8/src/execution/execution.cc7
-rw-r--r--deps/v8/src/execution/frame-constants.h15
-rw-r--r--deps/v8/src/execution/frames-inl.h25
-rw-r--r--deps/v8/src/execution/frames.cc44
-rw-r--r--deps/v8/src/execution/frames.h43
-rw-r--r--deps/v8/src/execution/futex-emulation.cc62
-rw-r--r--deps/v8/src/execution/futex-emulation.h13
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.h37
-rw-r--r--deps/v8/src/execution/isolate-data.h17
-rw-r--r--deps/v8/src/execution/isolate-inl.h12
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h14
-rw-r--r--deps/v8/src/execution/isolate.cc103
-rw-r--r--deps/v8/src/execution/isolate.h151
-rw-r--r--deps/v8/src/execution/messages.cc68
-rw-r--r--deps/v8/src/execution/messages.h4
-rw-r--r--deps/v8/src/execution/microtask-queue.cc1
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.h37
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc26
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.h37
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc26
-rw-r--r--deps/v8/src/execution/off-thread-isolate.cc45
-rw-r--r--deps/v8/src/execution/off-thread-isolate.h99
-rw-r--r--deps/v8/src/execution/pointer-authentication-dummy.h56
-rw-r--r--deps/v8/src/execution/pointer-authentication.h65
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.cc4
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h38
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc126
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.h1
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc22
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h38
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc471
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.h4
-rw-r--r--deps/v8/src/execution/simulator-base.h1
-rw-r--r--deps/v8/src/execution/simulator.h2
-rw-r--r--deps/v8/src/execution/stack-guard.h2
-rw-r--r--deps/v8/src/execution/vm-state-inl.h10
-rw-r--r--deps/v8/src/execution/vm-state.h9
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h37
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.cc10
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc37
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc20
-rw-r--r--deps/v8/src/extensions/vtunedomain-support-extension.cc14
-rw-r--r--deps/v8/src/flags/flag-definitions.h101
-rw-r--r--deps/v8/src/flags/flags.cc9
-rw-r--r--deps/v8/src/handles/factory-handles.h23
-rw-r--r--deps/v8/src/handles/global-handles.cc27
-rw-r--r--deps/v8/src/handles/global-handles.h8
-rw-r--r--deps/v8/src/handles/handles-inl.h53
-rw-r--r--deps/v8/src/handles/handles.cc2
-rw-r--r--deps/v8/src/handles/handles.h133
-rw-r--r--deps/v8/src/handles/local-handles-inl.h61
-rw-r--r--deps/v8/src/handles/local-handles.cc58
-rw-r--r--deps/v8/src/handles/local-handles.h57
-rw-r--r--deps/v8/src/handles/maybe-handles.h8
-rw-r--r--deps/v8/src/heap/OWNERS4
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc114
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.h36
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h18
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc4
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc2
-rw-r--r--deps/v8/src/heap/cppgc/allocation.cc23
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers.S52
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S45
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.cc124
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h113
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc18
-rw-r--r--deps/v8/src/heap/cppgc/globals.h39
-rw-r--r--deps/v8/src/heap/cppgc/heap-inl.h36
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header-inl.h148
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.cc30
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h127
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc28
-rw-r--r--deps/v8/src/heap/cppgc/heap.h35
-rw-r--r--deps/v8/src/heap/cppgc/platform.cc29
-rw-r--r--deps/v8/src/heap/cppgc/sanitizers.h38
-rw-r--r--deps/v8/src/heap/cppgc/stack.cc136
-rw-r--r--deps/v8/src/heap/cppgc/stack.h49
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc1
-rw-r--r--deps/v8/src/heap/embedder-tracing.h9
-rw-r--r--deps/v8/src/heap/factory-base-inl.h99
-rw-r--r--deps/v8/src/heap/factory-base.cc614
-rw-r--r--deps/v8/src/heap/factory-base.h187
-rw-r--r--deps/v8/src/heap/factory-inl.h43
-rw-r--r--deps/v8/src/heap/factory.cc581
-rw-r--r--deps/v8/src/heap/factory.h169
-rw-r--r--deps/v8/src/heap/finalization-registry-cleanup-task.cc77
-rw-r--r--deps/v8/src/heap/finalization-registry-cleanup-task.h36
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc38
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h33
-rw-r--r--deps/v8/src/heap/gc-tracer.cc16
-rw-r--r--deps/v8/src/heap/gc-tracer.h7
-rw-r--r--deps/v8/src/heap/heap-inl.h64
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h1
-rw-r--r--deps/v8/src/heap/heap.cc363
-rw-r--r--deps/v8/src/heap/heap.h102
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc69
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h14
-rw-r--r--deps/v8/src/heap/incremental-marking.cc222
-rw-r--r--deps/v8/src/heap/incremental-marking.h39
-rw-r--r--deps/v8/src/heap/local-heap.cc71
-rw-r--r--deps/v8/src/heap/local-heap.h89
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h10
-rw-r--r--deps/v8/src/heap/mark-compact.cc36
-rw-r--r--deps/v8/src/heap/marking-worklist.h4
-rw-r--r--deps/v8/src/heap/memory-measurement.cc11
-rw-r--r--deps/v8/src/heap/memory-measurement.h4
-rw-r--r--deps/v8/src/heap/object-stats.h2
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h3
-rw-r--r--deps/v8/src/heap/objects-visiting.cc27
-rw-r--r--deps/v8/src/heap/objects-visiting.h5
-rw-r--r--deps/v8/src/heap/off-thread-factory-inl.h17
-rw-r--r--deps/v8/src/heap/off-thread-factory.cc163
-rw-r--r--deps/v8/src/heap/off-thread-factory.h54
-rw-r--r--deps/v8/src/heap/read-only-heap.cc9
-rw-r--r--deps/v8/src/heap/safepoint.cc134
-rw-r--r--deps/v8/src/heap/safepoint.h77
-rw-r--r--deps/v8/src/heap/scavenge-job.cc115
-rw-r--r--deps/v8/src/heap/scavenge-job.h66
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc19
-rw-r--r--deps/v8/src/heap/spaces.cc53
-rw-r--r--deps/v8/src/heap/spaces.h69
-rw-r--r--deps/v8/src/heap/third-party/heap-api.h7
-rw-r--r--deps/v8/src/heap/worklist.h50
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc152
-rw-r--r--deps/v8/src/ic/accessor-assembler.h33
-rw-r--r--deps/v8/src/ic/ic.cc65
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc11
-rw-r--r--deps/v8/src/ic/stub-cache.h15
-rw-r--r--deps/v8/src/init/bootstrapper.cc77
-rw-r--r--deps/v8/src/init/heap-symbols.h1
-rw-r--r--deps/v8/src/inspector/custom-preview.cc6
-rw-r--r--deps/v8/src/inspector/injected-script.cc160
-rw-r--r--deps/v8/src/inspector/remote-object-id.cc19
-rw-r--r--deps/v8/src/inspector/string-16.cc15
-rw-r--r--deps/v8/src/inspector/string-16.h6
-rw-r--r--deps/v8/src/inspector/string-util.cc59
-rw-r--r--deps/v8/src/inspector/string-util.h55
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.cc10
-rw-r--r--deps/v8/src/inspector/v8-console.cc2
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc289
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc13
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h3
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc28
-rw-r--r--deps/v8/src/inspector/v8-debugger.h3
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc50
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc20
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h16
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc118
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h41
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc79
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc142
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc23
-rw-r--r--deps/v8/src/inspector/value-mirror.cc45
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc65
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h17
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc40
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h14
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc460
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h41
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc7
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc15
-rw-r--r--deps/v8/src/interpreter/bytecodes.h10
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc37
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h15
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc31
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h26
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc7
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc35
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc137
-rw-r--r--deps/v8/src/interpreter/interpreter.cc51
-rw-r--r--deps/v8/src/interpreter/interpreter.h3
-rw-r--r--deps/v8/src/json/json-parser.cc2
-rw-r--r--deps/v8/src/libplatform/tracing/DEPS1
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.h4
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.h4
-rw-r--r--deps/v8/src/libsampler/sampler.cc6
-rw-r--r--deps/v8/src/logging/counters-definitions.h32
-rw-r--r--deps/v8/src/logging/counters.cc10
-rw-r--r--deps/v8/src/logging/counters.h9
-rw-r--r--deps/v8/src/logging/log.cc63
-rw-r--r--deps/v8/src/logging/log.h21
-rw-r--r--deps/v8/src/logging/off-thread-logger.h31
-rw-r--r--deps/v8/src/numbers/conversions-inl.h4
-rw-r--r--deps/v8/src/numbers/conversions.cc183
-rw-r--r--deps/v8/src/numbers/conversions.h6
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h4
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h1
-rw-r--r--deps/v8/src/objects/api-callbacks.h21
-rw-r--r--deps/v8/src/objects/api-callbacks.tq16
-rw-r--r--deps/v8/src/objects/arguments-inl.h2
-rw-r--r--deps/v8/src/objects/arguments.h3
-rw-r--r--deps/v8/src/objects/backing-store.cc74
-rw-r--r--deps/v8/src/objects/backing-store.h32
-rw-r--r--deps/v8/src/objects/bigint.cc58
-rw-r--r--deps/v8/src/objects/bigint.h13
-rw-r--r--deps/v8/src/objects/code-inl.h1
-rw-r--r--deps/v8/src/objects/code.cc17
-rw-r--r--deps/v8/src/objects/code.h10
-rw-r--r--deps/v8/src/objects/compilation-cache.h4
-rw-r--r--deps/v8/src/objects/contexts.cc10
-rw-r--r--deps/v8/src/objects/contexts.h21
-rw-r--r--deps/v8/src/objects/contexts.tq2
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h11
-rw-r--r--deps/v8/src/objects/debug-objects.cc54
-rw-r--r--deps/v8/src/objects/debug-objects.h71
-rw-r--r--deps/v8/src/objects/debug-objects.tq38
-rw-r--r--deps/v8/src/objects/descriptor-array.h8
-rw-r--r--deps/v8/src/objects/descriptor-array.tq2
-rw-r--r--deps/v8/src/objects/dictionary-inl.h61
-rw-r--r--deps/v8/src/objects/dictionary.h80
-rw-r--r--deps/v8/src/objects/elements.cc48
-rw-r--r--deps/v8/src/objects/elements.h9
-rw-r--r--deps/v8/src/objects/embedder-data-array-inl.h1
-rw-r--r--deps/v8/src/objects/embedder-data-array.h4
-rw-r--r--deps/v8/src/objects/embedder-data-array.tq1
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h14
-rw-r--r--deps/v8/src/objects/feedback-cell.h9
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h7
-rw-r--r--deps/v8/src/objects/feedback-vector.cc33
-rw-r--r--deps/v8/src/objects/feedback-vector.h7
-rw-r--r--deps/v8/src/objects/fixed-array.tq6
-rw-r--r--deps/v8/src/objects/free-space-inl.h3
-rw-r--r--deps/v8/src/objects/hash-table-inl.h18
-rw-r--r--deps/v8/src/objects/hash-table.h58
-rw-r--r--deps/v8/src/objects/heap-object.h7
-rw-r--r--deps/v8/src/objects/heap-object.tq2
-rw-r--r--deps/v8/src/objects/instance-type.h1
-rw-r--r--deps/v8/src/objects/intl-objects.cc132
-rw-r--r--deps/v8/src/objects/intl-objects.h36
-rw-r--r--deps/v8/src/objects/intl-objects.tq62
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h63
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc17
-rw-r--r--deps/v8/src/objects/js-array-buffer.h53
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq23
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h10
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc89
-rw-r--r--deps/v8/src/objects/js-break-iterator.h8
-rw-r--r--deps/v8/src/objects/js-collator.cc52
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc166
-rw-r--r--deps/v8/src/objects/js-date-time-format.h32
-rw-r--r--deps/v8/src/objects/js-display-names-inl.h6
-rw-r--r--deps/v8/src/objects/js-display-names.cc15
-rw-r--r--deps/v8/src/objects/js-display-names.h12
-rw-r--r--deps/v8/src/objects/js-generator-inl.h7
-rw-r--r--deps/v8/src/objects/js-generator.h17
-rw-r--r--deps/v8/src/objects/js-generator.tq6
-rw-r--r--deps/v8/src/objects/js-list-format.cc45
-rw-r--r--deps/v8/src/objects/js-list-format.h8
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h14
-rw-r--r--deps/v8/src/objects/js-number-format.cc379
-rw-r--r--deps/v8/src/objects/js-number-format.h21
-rw-r--r--deps/v8/src/objects/js-objects-inl.h11
-rw-r--r--deps/v8/src/objects/js-objects.cc53
-rw-r--r--deps/v8/src/objects/js-objects.h6
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h6
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc16
-rw-r--r--deps/v8/src/objects/js-plural-rules.h13
-rw-r--r--deps/v8/src/objects/js-promise-inl.h1
-rw-r--r--deps/v8/src/objects/js-promise.h2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h1
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h17
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc126
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h31
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h8
-rw-r--r--deps/v8/src/objects/js-segmenter.cc15
-rw-r--r--deps/v8/src/objects/js-segmenter.h8
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h131
-rw-r--r--deps/v8/src/objects/js-weak-refs.h62
-rw-r--r--deps/v8/src/objects/js-weak-refs.tq20
-rw-r--r--deps/v8/src/objects/keys.cc37
-rw-r--r--deps/v8/src/objects/keys.h1
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h2
-rw-r--r--deps/v8/src/objects/literal-objects.cc115
-rw-r--r--deps/v8/src/objects/literal-objects.h12
-rw-r--r--deps/v8/src/objects/lookup.cc3
-rw-r--r--deps/v8/src/objects/map-inl.h2
-rw-r--r--deps/v8/src/objects/map.cc47
-rw-r--r--deps/v8/src/objects/map.h25
-rw-r--r--deps/v8/src/objects/module-inl.h18
-rw-r--r--deps/v8/src/objects/name.h18
-rw-r--r--deps/v8/src/objects/object-list-macros.h11
-rw-r--r--deps/v8/src/objects/object-macros.h8
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h53
-rw-r--r--deps/v8/src/objects/objects-body-descriptors.h4
-rw-r--r--deps/v8/src/objects/objects-definitions.h13
-rw-r--r--deps/v8/src/objects/objects-inl.h11
-rw-r--r--deps/v8/src/objects/objects.cc434
-rw-r--r--deps/v8/src/objects/objects.h25
-rw-r--r--deps/v8/src/objects/oddball-inl.h8
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h24
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc28
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h24
-rw-r--r--deps/v8/src/objects/ordered-hash-table.tq6
-rw-r--r--deps/v8/src/objects/promise.tq8
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h16
-rw-r--r--deps/v8/src/objects/property-descriptor-object.tq14
-rw-r--r--deps/v8/src/objects/property-descriptor.cc4
-rw-r--r--deps/v8/src/objects/scope-info.cc38
-rw-r--r--deps/v8/src/objects/scope-info.h8
-rw-r--r--deps/v8/src/objects/script.h8
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h43
-rw-r--r--deps/v8/src/objects/shared-function-info.h45
-rw-r--r--deps/v8/src/objects/shared-function-info.tq16
-rw-r--r--deps/v8/src/objects/slots-inl.h6
-rw-r--r--deps/v8/src/objects/slots.h1
-rw-r--r--deps/v8/src/objects/source-text-module.cc11
-rw-r--r--deps/v8/src/objects/source-text-module.h29
-rw-r--r--deps/v8/src/objects/source-text-module.tq6
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h3
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc38
-rw-r--r--deps/v8/src/objects/stack-frame-info.h3
-rw-r--r--deps/v8/src/objects/string-inl.h23
-rw-r--r--deps/v8/src/objects/string-table-inl.h8
-rw-r--r--deps/v8/src/objects/string-table.h10
-rw-r--r--deps/v8/src/objects/string.cc60
-rw-r--r--deps/v8/src/objects/string.h12
-rw-r--r--deps/v8/src/objects/string.tq2
-rw-r--r--deps/v8/src/objects/struct-inl.h3
-rw-r--r--deps/v8/src/objects/struct.h3
-rw-r--r--deps/v8/src/objects/tagged-index.h84
-rw-r--r--deps/v8/src/objects/template-objects-inl.h2
-rw-r--r--deps/v8/src/objects/template-objects.h2
-rw-r--r--deps/v8/src/objects/template.tq4
-rw-r--r--deps/v8/src/objects/templates-inl.h28
-rw-r--r--deps/v8/src/objects/templates.h18
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc4
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h2
-rw-r--r--deps/v8/src/parsing/literal-buffer.cc8
-rw-r--r--deps/v8/src/parsing/literal-buffer.h3
-rw-r--r--deps/v8/src/parsing/parse-info.cc136
-rw-r--r--deps/v8/src/parsing/parse-info.h52
-rw-r--r--deps/v8/src/parsing/parser-base.h202
-rw-r--r--deps/v8/src/parsing/parser.cc184
-rw-r--r--deps/v8/src/parsing/parser.h78
-rw-r--r--deps/v8/src/parsing/parsing.cc31
-rw-r--r--deps/v8/src/parsing/parsing.h13
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc10
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h1
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h1
-rw-r--r--deps/v8/src/parsing/preparse-data.cc57
-rw-r--r--deps/v8/src/parsing/preparse-data.h8
-rw-r--r--deps/v8/src/parsing/preparser.h89
-rw-r--r--deps/v8/src/parsing/rewriter.cc2
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc29
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h2
-rw-r--r--deps/v8/src/parsing/scanner.cc13
-rw-r--r--deps/v8/src/parsing/scanner.h6
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc2
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h10
-rw-r--r--deps/v8/src/profiler/profile-generator.cc51
-rw-r--r--deps/v8/src/profiler/profile-generator.h10
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc24
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc3
-rw-r--r--deps/v8/src/profiler/strings-storage.cc36
-rw-r--r--deps/v8/src/profiler/strings-storage.h7
-rw-r--r--deps/v8/src/profiler/tick-sample.cc2
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc17
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc32
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h3
-rw-r--r--deps/v8/src/regexp/gen-regexp-special-case.cc150
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc17
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h2
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc17
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h2
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc17
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h2
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc21
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h2
-rw-r--r--deps/v8/src/regexp/regexp-ast.h29
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc8
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h2
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-peephole.cc1
-rw-r--r--deps/v8/src/regexp/regexp-bytecodes.h8
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc114
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc115
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h16
-rw-r--r--deps/v8/src/regexp/regexp-error.cc22
-rw-r--r--deps/v8/src/regexp/regexp-error.h58
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc45
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-arch.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc10
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.h1
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc48
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h10
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc142
-rw-r--r--deps/v8/src/regexp/regexp-parser.h20
-rw-r--r--deps/v8/src/regexp/regexp-stack.h6
-rw-r--r--deps/v8/src/regexp/regexp.cc45
-rw-r--r--deps/v8/src/regexp/regexp.h7
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc15
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h2
-rw-r--r--deps/v8/src/regexp/special-case.h141
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc15
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h1
-rw-r--r--deps/v8/src/roots/roots-inl.h6
-rw-r--r--deps/v8/src/roots/roots.h25
-rw-r--r--deps/v8/src/runtime/runtime-array.cc9
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc2
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc36
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc26
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc2
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc16
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc6
-rw-r--r--deps/v8/src/runtime/runtime-object.cc17
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc8
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc20
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc101
-rw-r--r--deps/v8/src/runtime/runtime-test.cc12
-rw-r--r--deps/v8/src/runtime/runtime-utils.h16
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc249
-rw-r--r--deps/v8/src/runtime/runtime.cc28
-rw-r--r--deps/v8/src/runtime/runtime.h23
-rw-r--r--deps/v8/src/snapshot/DEPS3
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc16
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc28
-rw-r--r--deps/v8/src/snapshot/deserializer.cc36
-rw-r--r--deps/v8/src/snapshot/deserializer.h2
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc354
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h1
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc3
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc1
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc3
-rw-r--r--deps/v8/src/snapshot/serializer.cc8
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc84
-rw-r--r--deps/v8/src/snapshot/snapshot-compression.cc95
-rw-r--r--deps/v8/src/snapshot/snapshot-compression.h27
-rw-r--r--deps/v8/src/snapshot/snapshot.h16
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc4
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc10
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h4
-rw-r--r--deps/v8/src/strings/string-hasher-inl.h32
-rw-r--r--deps/v8/src/torque/ast.h6
-rw-r--r--deps/v8/src/torque/cfg.cc28
-rw-r--r--deps/v8/src/torque/cfg.h37
-rw-r--r--deps/v8/src/torque/class-debug-reader-generator.cc136
-rw-r--r--deps/v8/src/torque/constants.h10
-rw-r--r--deps/v8/src/torque/csa-generator.cc712
-rw-r--r--deps/v8/src/torque/csa-generator.h47
-rw-r--r--deps/v8/src/torque/declarable.h2
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc16
-rw-r--r--deps/v8/src/torque/global-context.h10
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc907
-rw-r--r--deps/v8/src/torque/implementation-visitor.h61
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc47
-rw-r--r--deps/v8/src/torque/instructions.cc341
-rw-r--r--deps/v8/src/torque/instructions.h173
-rw-r--r--deps/v8/src/torque/torque-compiler.cc2
-rw-r--r--deps/v8/src/torque/torque-parser.cc104
-rw-r--r--deps/v8/src/torque/type-oracle.cc28
-rw-r--r--deps/v8/src/torque/type-oracle.h45
-rw-r--r--deps/v8/src/torque/type-visitor.cc155
-rw-r--r--deps/v8/src/torque/types.cc135
-rw-r--r--deps/v8/src/torque/types.h32
-rw-r--r--deps/v8/src/torque/utils.cc2
-rw-r--r--deps/v8/src/torque/utils.h39
-rw-r--r--deps/v8/src/utils/vector.h12
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h400
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h312
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h619
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc163
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h126
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc1244
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h6
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h38
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h421
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h417
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h236
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h236
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h676
-rw-r--r--deps/v8/src/wasm/c-api.cc95
-rw-r--r--deps/v8/src/wasm/compilation-environment.h11
-rw-r--r--deps/v8/src/wasm/decoder.h1
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h106
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc28
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h14
-rw-r--r--deps/v8/src/wasm/function-compiler.cc8
-rw-r--r--deps/v8/src/wasm/function-compiler.h13
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc47
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc28
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h12
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc2
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.h11
-rw-r--r--deps/v8/src/wasm/module-compiler.cc588
-rw-r--r--deps/v8/src/wasm/module-compiler.h9
-rw-r--r--deps/v8/src/wasm/module-decoder.cc399
-rw-r--r--deps/v8/src/wasm/module-decoder.h29
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc89
-rw-r--r--deps/v8/src/wasm/value-type.h459
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h6
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc483
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h193
-rw-r--r--deps/v8/src/wasm/wasm-constants.h2
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.cc274
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.h25
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc514
-rw-r--r--deps/v8/src/wasm/wasm-debug.h120
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc312
-rw-r--r--deps/v8/src/wasm/wasm-engine.h123
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc174
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h12
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc2
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc226
-rw-r--r--deps/v8/src/wasm/wasm-js.cc134
-rw-r--r--deps/v8/src/wasm/wasm-limits.h17
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h2
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc633
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc23
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.cc13
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.h6
-rw-r--r--deps/v8/src/wasm/wasm-module.cc62
-rw-r--r--deps/v8/src/wasm/wasm-module.h59
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h20
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc130
-rw-r--r--deps/v8/src/wasm/wasm-objects.h50
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq11
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc114
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h398
-rw-r--r--deps/v8/src/wasm/wasm-result.cc15
-rw-r--r--deps/v8/src/wasm/wasm-result.h14
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc139
-rw-r--r--deps/v8/src/wasm/wasm-tier.h2
-rwxr-xr-xdeps/v8/test/benchmarks/csuite/csuite.py2
-rw-r--r--deps/v8/test/cctest/BUILD.gn39
-rw-r--r--deps/v8/test/cctest/cctest.h72
-rw-r--r--deps/v8/test/cctest/cctest.status35
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc141
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc16
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc12
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc13
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc114
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc9
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc3
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden106
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden346
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden121
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden23
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden41
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareTypeOf.golden15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden17
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden37
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden23
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden3
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden68
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden57
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden122
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden31
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden90
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden39
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden25
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden443
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden45
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden72
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden33
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden33
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden33
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden13
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden104
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden44
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden25
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden29
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden35
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden3
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h57
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc18
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc119
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc8
-rw-r--r--deps/v8/test/cctest/test-accessors.cc9
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc148
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc6
-rw-r--r--deps/v8/test/cctest/test-api.cc551
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc139
-rw-r--r--deps/v8/test/cctest/test-code-pages.cc23
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc69
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc4
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc27
-rw-r--r--deps/v8/test/cctest/test-debug.cc656
-rw-r--r--deps/v8/test/cctest/test-decls.cc33
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc40
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc67
-rw-r--r--deps/v8/test/cctest/test-factory.cc3
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc33
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc2
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc3
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc61
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc7
-rw-r--r--deps/v8/test/cctest/test-icache.cc5
-rw-r--r--deps/v8/test/cctest/test-inspector.cc15
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc412
-rw-r--r--deps/v8/test/cctest/test-local-handles.cc96
-rw-r--r--deps/v8/test/cctest/test-log.cc12
-rw-r--r--deps/v8/test/cctest/test-modules.cc265
-rw-r--r--deps/v8/test/cctest/test-object.cc23
-rw-r--r--deps/v8/test/cctest/test-orderedhashtable.cc4
-rw-r--r--deps/v8/test/cctest/test-parsing.cc43
-rw-r--r--deps/v8/test/cctest/test-regexp.cc47
-rw-r--r--deps/v8/test/cctest/test-roots.cc1
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc63
-rw-r--r--deps/v8/test/cctest/test-serialize.cc75
-rw-r--r--deps/v8/test/cctest/test-strings.cc33
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc9
-rw-r--r--deps/v8/test/cctest/test-types.cc140
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc74
-rw-r--r--deps/v8/test/cctest/test-unwinder.cc100
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc36
-rw-r--r--deps/v8/test/cctest/test-v8windbg.cc14
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc29
-rw-r--r--deps/v8/test/cctest/trace-extension.cc18
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-compilation-cache.cc140
-rw-r--r--deps/v8/test/cctest/wasm/test-grow-memory.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc284
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc46
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc37
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc283
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc29
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc33
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc11
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc315
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc58
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc84
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h79
-rw-r--r--deps/v8/test/common/types-fuzz.h68
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h29
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc27
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js42
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js62
-rw-r--r--deps/v8/test/debugger/debugger.status14
-rw-r--r--deps/v8/test/debugging/debugging.status5
-rw-r--r--deps/v8/test/debugging/testcfg.py99
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/connect.py41
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py73
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test.js33
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc4
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc3
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc2
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc871
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc54
-rw-r--r--deps/v8/test/fuzzer/wasm.cc4
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js47
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js44
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt148
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt17
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-nested.js61
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-preview-expected.txt28
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-preview.js41
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static.js122
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt83
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused.js79
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods.js4
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt26
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js34
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-extrawide.js39
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-wide.js37
-rw-r--r--deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt40
-rw-r--r--deps/v8/test/inspector/debugger/script-origin-stack-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets-expected.txt70
-rw-r--r--deps/v8/test/inspector/debugger/wasm-global-names-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/wasm-global-names.js82
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt103
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js122
-rw-r--r--deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt230
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt167
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js22
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js22
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts-expected.txt40
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts-with-name-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt391
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js93
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt173
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js19
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-in-from-js.js36
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt69
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js146
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt13
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt167
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js20
-rw-r--r--deps/v8/test/inspector/debugger/wasm-unnamed-function-names-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-unnamed-function-names.js69
-rw-r--r--deps/v8/test/inspector/inspector-test.cc3
-rw-r--r--deps/v8/test/inspector/inspector.status20
-rw-r--r--deps/v8/test/inspector/isolate-data.cc25
-rw-r--r--deps/v8/test/inspector/protocol-test.js4
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt4
-rw-r--r--deps/v8/test/inspector/testcfg.py3
-rw-r--r--deps/v8/test/intl/regress-10248.js70
-rw-r--r--deps/v8/test/intl/regress-1030160.js5
-rw-r--r--deps/v8/test/intl/regress-1041319.js46
-rw-r--r--deps/v8/test/js-perf-test/Scope/with.js25
-rw-r--r--deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out4
-rw-r--r--deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out4
-rw-r--r--deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.js11
-rw-r--r--deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.out9
-rw-r--r--deps/v8/test/message/fail/class-methods-private-brand-check.js11
-rw-r--r--deps/v8/test/message/fail/class-methods-private-brand-check.out9
-rw-r--r--deps/v8/test/message/fail/spread-call-2.js5
-rw-r--r--deps/v8/test/message/fail/spread-call-2.out5
-rw-r--r--deps/v8/test/message/fail/spread-call-3.js6
-rw-r--r--deps/v8/test/message/fail/spread-call-3.out5
-rw-r--r--deps/v8/test/message/fail/spread-call.js5
-rw-r--r--deps/v8/test/message/fail/spread-call.out5
-rw-r--r--deps/v8/test/message/fail/spread-construct-2.js6
-rw-r--r--deps/v8/test/message/fail/spread-construct-2.out5
-rw-r--r--deps/v8/test/message/fail/spread-construct-3.js7
-rw-r--r--deps/v8/test/message/fail/spread-construct-3.out5
-rw-r--r--deps/v8/test/message/fail/spread-construct.js6
-rw-r--r--deps/v8/test/message/fail/spread-construct.out5
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.out2
-rw-r--r--deps/v8/test/message/fail/wasm-exception-throw.out2
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-trap.out2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationgroup1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationgroup2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry1.js (renamed from deps/v8/test/message/fail/weak-refs-finalizationgroup2.js)2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry2.js (renamed from deps/v8/test/message/fail/weak-refs-finalizationgroup1.js)2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.out6
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.out2
-rw-r--r--deps/v8/test/message/wasm-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.out3
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.out5
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js14
-rw-r--r--deps/v8/test/message/wasm-trace-memory.out3
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.js26
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.out12
-rw-r--r--deps/v8/test/mjsunit/BUILD.gn3
-rw-r--r--deps/v8/test/mjsunit/array-reduce.js8
-rw-r--r--deps/v8/test/mjsunit/asm/regress-674089.js5
-rw-r--r--deps/v8/test/mjsunit/call-intrinsic-differential-fuzzing.js8
-rw-r--r--deps/v8/test/mjsunit/call-intrinsic-fuzzing.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-id.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1061678.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1061803.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1062916.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1063661.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1072171.js45
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js23
-rw-r--r--deps/v8/test/mjsunit/for-in-special-cases.js70
-rw-r--r--deps/v8/test/mjsunit/frozen-array-reduce.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-throw-caught.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/property-names.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-methods-empty-inner.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/private-methods.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js68
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref (renamed from deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js)4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js26
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/finalization-group-keeps-holdings-alive.js)13
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-scheduled-for-cleanup-multiple-times.js)17
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js11
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status43
-rw-r--r--deps/v8/test/mjsunit/non-extensible-array-reduce.js8
-rw-r--r--deps/v8/test/mjsunit/optimized-array-every.js6
-rw-r--r--deps/v8/test/mjsunit/optimized-array-find.js6
-rw-r--r--deps/v8/test/mjsunit/optimized-array-findindex.js6
-rw-r--r--deps/v8/test/mjsunit/optimized-array-some.js6
-rw-r--r--deps/v8/test/mjsunit/optimized-filter.js6
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach.js5
-rw-r--r--deps/v8/test/mjsunit/optimized-map.js6
-rw-r--r--deps/v8/test/mjsunit/prepare-missing-label-syntax-error.js8
-rw-r--r--deps/v8/test/mjsunit/regexp.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1049982-1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1049982-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-906893.js (renamed from deps/v8/test/mjsunit/regress-906893.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-918763.js (renamed from deps/v8/test/mjsunit/regress-918763.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-930045.js (renamed from deps/v8/test/mjsunit/regress-930045.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-932101.js (renamed from deps/v8/test/mjsunit/regress-932101.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-952682.js (renamed from deps/v8/test/mjsunit/regress-952682.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-956426.js (renamed from deps/v8/test/mjsunit/regress-956426.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-958725.js (renamed from deps/v8/test/mjsunit/regress-958725.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-963346.js (renamed from deps/v8/test/mjsunit/regress-963346.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-966460.js (renamed from deps/v8/test/mjsunit/regress-966460.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1025468.js (renamed from deps/v8/test/mjsunit/regress-crbug-1025468.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1047368.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1057653.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1059738.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1072947.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-regexp-functional-replace-slow.js (renamed from deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8445-2.js (renamed from deps/v8/test/mjsunit/regress-v8-8445-2.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8445.js (renamed from deps/v8/test/mjsunit/regress-v8-8445.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10126-streaming.js7
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10126.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10309.js64
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1045225.js28
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1046472.js33
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1048241.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1051912.js17
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1054466.js52
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1055692.js35
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-724846.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-789952.js40
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1057094.js14
-rw-r--r--deps/v8/test/mjsunit/sealed-array-reduce.js7
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie.js79
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie_archs.js84
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie_webassembly.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-globals.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-table.js34
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f32.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f64.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref.js95
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js104
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-huge-memory.js35
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js378
-rw-r--r--deps/v8/test/mjsunit/wasm/import-memory.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff-simd-params.js77
-rw-r--r--deps/v8/test/mjsunit/wasm/many-modules.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-external-call.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/multiple-code-spaces.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js32
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection.js33
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-exported.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-module.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/user-properties-reexport.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js74
-rw-r--r--deps/v8/test/test262/test262.status30
-rw-r--r--deps/v8/test/test262/testcfg.py2
-rw-r--r--deps/v8/test/torque/test-torque.tq110
-rw-r--r--deps/v8/test/unittests/BUILD.gn52
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc7
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc3
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc3
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc42
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc10
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc15
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc21
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc10
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc10
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc12
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc10
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc21
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc13
-rw-r--r--deps/v8/test/unittests/base/region-allocator-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc22
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc109
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc70
-rw-r--r--deps/v8/test/unittests/compiler/node-cache-unittest.cc38
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/state-values-utils-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc116
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc145
-rw-r--r--deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc42
-rw-r--r--deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc118
-rw-r--r--deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc26
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc153
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc181
-rw-r--r--deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc17
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stack_unittest.cc256
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.cc36
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h39
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc15
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc38
-rw-r--r--deps/v8/test/unittests/heap/off-thread-factory-unittest.cc304
-rw-r--r--deps/v8/test/unittests/heap/safepoint-unittest.cc139
-rw-r--r--deps/v8/test/unittests/heap/scavenge-job-unittest.cc114
-rw-r--r--deps/v8/test/unittests/heap/worklist-unittest.cc15
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc24
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc20
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc146
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc118
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc14
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc9
-rw-r--r--deps/v8/test/unittests/profiler/strings-storage-unittest.cc49
-rw-r--r--deps/v8/test/unittests/test-utils.h9
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc142
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc280
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc162
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc4
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status13
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status9
-rw-r--r--deps/v8/third_party/inspector_protocol/BUILD.gn21
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/code_generator.py7
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/dispatch.cc576
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/dispatch.h311
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/dispatch_test.cc445
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/error_support.cc59
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/error_support.h62
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/error_support_test.cc45
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/find_by_first.h58
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/find_by_first_test.cc76
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/frontend_channel.h47
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/json.cc15
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/json.h1
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/json_test.cc26
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/serializable.cc21
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/serializable.h12
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/serializable_test.cc5
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/span.cc24
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/span.h24
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/span_test.cc39
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/status.cc106
-rw-r--r--deps/v8/third_party/inspector_protocol/crdtp/status.h60
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gni11
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template355
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template178
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template73
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template39
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Forward_h.template12
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template33
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Object_cpp.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template548
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_h.template24
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template38
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_cpp.template305
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template34
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template59
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/roll.py11
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/Imported_h.template4
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template209
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template12
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq2
-rw-r--r--deps/v8/third_party/zlib/BUILD.gn5
-rw-r--r--deps/v8/third_party/zlib/contrib/minizip/iowin32.c8
-rw-r--r--deps/v8/third_party/zlib/cpu_features.c3
-rw-r--r--deps/v8/third_party/zlib/crc32_simd.h1
-rw-r--r--deps/v8/third_party/zlib/deflate.c2
-rw-r--r--deps/v8/third_party/zlib/patches/0000-build.patch42
-rw-r--r--deps/v8/third_party/zlib/patches/0001-simd.patch42
-rw-r--r--deps/v8/third_party/zlib/patches/0003-uninitializedjump.patch2
-rw-r--r--deps/v8/third_party/zlib/patches/0004-fix-uwp.patch22
-rw-r--r--deps/v8/third_party/zlib/trees.h1
-rw-r--r--deps/v8/tools/BUILD.gn20
-rw-r--r--deps/v8/tools/callstats.html59
-rwxr-xr-xdeps/v8/tools/callstats.py6
-rw-r--r--deps/v8/tools/clusterfuzz/BUILD.gn3
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/v8_commands.py34
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py78
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py211
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py72
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json17
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_flags.json34
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock.js118
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock_archs.js59
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock_webassembly.js18
-rw-r--r--deps/v8/tools/clusterfuzz/v8_suppressions.js25
-rw-r--r--deps/v8/tools/clusterfuzz/v8_suppressions.py106
-rw-r--r--deps/v8/tools/codemap.js4
-rw-r--r--deps/v8/tools/debug_helper/BUILD.gn22
-rw-r--r--deps/v8/tools/debug_helper/debug-helper-internal.cc8
-rw-r--r--deps/v8/tools/debug_helper/debug-helper-internal.h18
-rw-r--r--deps/v8/tools/debug_helper/debug-helper.h7
-rw-r--r--deps/v8/tools/debug_helper/gen-heap-constants.py6
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc11
-rw-r--r--deps/v8/tools/debug_helper/heap-constants.cc8
-rw-r--r--deps/v8/tools/debug_helper/heap-constants.h8
-rw-r--r--deps/v8/tools/debug_helper/list-object-classes.cc2
-rwxr-xr-xdeps/v8/tools/dev/gm.py7
-rw-r--r--deps/v8/tools/gdb-v8-support.py9
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py1
-rwxr-xr-xdeps/v8/tools/generate-runtime-call-stats.py36
-rw-r--r--deps/v8/tools/heap-stats/categories.js4
-rw-r--r--deps/v8/tools/heap-stats/details-selection-template.html (renamed from deps/v8/tools/heap-stats/details-selection.html)5
-rw-r--r--deps/v8/tools/heap-stats/details-selection.js19
-rw-r--r--deps/v8/tools/heap-stats/global-timeline-template.html (renamed from deps/v8/tools/heap-stats/global-timeline.html)3
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.js19
-rw-r--r--deps/v8/tools/heap-stats/helper.js7
-rw-r--r--deps/v8/tools/heap-stats/histogram-viewer-template.html (renamed from deps/v8/tools/heap-stats/histogram-viewer.html)3
-rw-r--r--deps/v8/tools/heap-stats/histogram-viewer.js19
-rw-r--r--deps/v8/tools/heap-stats/index.html10
-rw-r--r--deps/v8/tools/heap-stats/model.js2
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader-template.html (renamed from deps/v8/tools/heap-stats/trace-file-reader.html)5
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js140
-rw-r--r--deps/v8/tools/map-processor.html21
-rwxr-xr-xdeps/v8/tools/mb/mb.py46
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py9
-rw-r--r--deps/v8/tools/profile.js16
-rw-r--r--deps/v8/tools/release/git_recipes.py1
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py8
-rw-r--r--deps/v8/tools/testrunner/base_runner.py23
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/pool.py8
-rw-r--r--deps/v8/tools/testrunner/local/variants.py1
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py17
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py6
-rw-r--r--deps/v8/tools/testrunner/test_config.py4
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py4
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py71
-rw-r--r--deps/v8/tools/tickprocessor-driver.js2
-rw-r--r--deps/v8/tools/tickprocessor.js25
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py18
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py8
-rw-r--r--deps/v8/tools/v8heapconst.py445
-rw-r--r--deps/v8/tools/v8windbg/BUILD.gn116
-rw-r--r--deps/v8/tools/v8windbg/README.md151
-rw-r--r--deps/v8/tools/v8windbg/base/dbgext.cc75
-rw-r--r--deps/v8/tools/v8windbg/base/dbgext.def5
-rw-r--r--deps/v8/tools/v8windbg/base/dbgext.h34
-rw-r--r--deps/v8/tools/v8windbg/base/utilities.cc246
-rw-r--r--deps/v8/tools/v8windbg/base/utilities.h85
-rw-r--r--deps/v8/tools/v8windbg/copy-prereqs.py38
-rw-r--r--deps/v8/tools/v8windbg/src/cur-isolate.cc94
-rw-r--r--deps/v8/tools/v8windbg/src/cur-isolate.h34
-rw-r--r--deps/v8/tools/v8windbg/src/list-chunks.cc238
-rw-r--r--deps/v8/tools/v8windbg/src/list-chunks.h100
-rw-r--r--deps/v8/tools/v8windbg/src/local-variables.cc120
-rw-r--r--deps/v8/tools/v8windbg/src/local-variables.h36
-rw-r--r--deps/v8/tools/v8windbg/src/object-inspection.cc622
-rw-r--r--deps/v8/tools/v8windbg/src/object-inspection.h261
-rw-r--r--deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc157
-rw-r--r--deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h138
-rw-r--r--deps/v8/tools/v8windbg/src/v8windbg-extension.cc340
-rw-r--r--deps/v8/tools/v8windbg/src/v8windbg-extension.h81
-rw-r--r--deps/v8/tools/v8windbg/test/debug-callbacks.cc94
-rw-r--r--deps/v8/tools/v8windbg/test/debug-callbacks.h90
-rw-r--r--deps/v8/tools/v8windbg/test/script.js14
-rw-r--r--deps/v8/tools/v8windbg/test/v8windbg-test.cc243
-rw-r--r--deps/v8/tools/v8windbg/test/v8windbg-test.h18
-rw-r--r--deps/v8/tools/whitespace.txt6
1524 files changed, 57362 insertions, 26750 deletions
diff --git a/deps/v8/.git-blame-ignore-revs b/deps/v8/.git-blame-ignore-revs
index 5ae3977031..4c53e208e3 100644
--- a/deps/v8/.git-blame-ignore-revs
+++ b/deps/v8/.git-blame-ignore-revs
@@ -23,3 +23,6 @@ e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
# Rewrite code base to use "." instead of "->" to access Object members.
878ccb33bd3cf0e6dc018ff8d15843f585ac07be
+
+# Move test/mjsunit/regress-*.js => test/mjsunit/regress/
+cb67be1a3842fcf6a0da18aee444e3b7ea789e04
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 819d147096..7036ecd42b 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -77,11 +77,13 @@ Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel James <dnljms@gmail.com>
David Carlier <devnexen@gmail.com>
+David Manouchehri <david@davidmanouchehri.com>
Deepak Mohan <hop2deep@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
Dominic Farolini <domfarolino@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
+Eric Rannaud <eric.rannaud@gmail.com>
Erich Ocean <erich.ocean@me.com>
Evan Lucas <evan.lucas@help.com>
Fedor Indutny <fedor@indutny.com>
@@ -97,12 +99,14 @@ Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
Huáng Jùnliàng <jlhwung@gmail.com>
+Iain Ireland <iireland@mozilla.com>
Ingvar Stepanyan <me@rreverser.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jaime Bernardo <jaime@janeasystems.com>
Jan de Mooij <jandemooij@gmail.com>
Jan Krems <jan.krems@gmail.com>
+Janusz Majnert <jmajnert@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
@@ -151,6 +155,7 @@ Oliver Dunk <oliver@oliverdunk.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peng Fei <pfgenyun@gmail.com>
+Peng-Yu Chen <pengyu@libstarrify.so>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
@@ -195,6 +200,7 @@ Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wenlu Wang <kingwenlu@gmail.com>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
+Wouter Vermeiren <wouter.vermeiren@essensium.com>
Xiaoyin Liu <xiaoyin.l@outlook.com>
Yannic Bonenberger <contact@yannic-bonenberger.com>
Yong Wang <ccyongwang@tencent.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 0ffa2b794d..b2dde3f9d7 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -107,7 +107,7 @@ declare_args() {
# Enable pointer compression (sets -dV8_COMPRESS_POINTERS).
v8_enable_pointer_compression = ""
- v8_enable_31bit_smis_on_64bit_arch = true
+ v8_enable_31bit_smis_on_64bit_arch = false
# Sets -dOBJECT_PRINT.
v8_enable_object_print = ""
@@ -128,7 +128,7 @@ declare_args() {
v8_enable_concurrent_marking = true
# Sets -dV8_ARRAY_BUFFER_EXTENSION
- v8_enable_array_buffer_extension = false
+ v8_enable_array_buffer_extension = true
# Enables various testing features.
v8_enable_test_features = ""
@@ -211,6 +211,13 @@ declare_args() {
# Enable additional targets necessary for verification of torque
# file generation
v8_verify_torque_generation_invariance = false
+
+ # Disable all snapshot compression.
+ v8_enable_snapshot_compression = true
+
+ # Enable control-flow integrity features, such as pointer authentication for
+ # ARM64.
+ v8_control_flow_integrity = false
}
# Derived defaults.
@@ -270,6 +277,9 @@ assert(!v8_disable_write_barriers || v8_enable_single_generation,
assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
"Untrusted code mitigations are unsupported on ia32")
+assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
+ "Control-flow integrity is only supported on arm64")
+
assert(
!v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
"Pointer compression is not supported with shared read-only heap enabled")
@@ -298,6 +308,7 @@ config("internal_config") {
"//build/config/compiler:wexit_time_destructors",
":internal_config_base",
":v8_header_features",
+ ":v8_tracing_config",
]
if (is_component_build) {
@@ -305,6 +316,17 @@ config("internal_config") {
}
}
+# Should be applied to all targets that write trace events.
+config("v8_tracing_config") {
+ if (v8_use_perfetto) {
+ include_dirs = [
+ "third_party/perfetto/include",
+ "$root_gen_dir/third_party/perfetto",
+ "$root_gen_dir/third_party/perfetto/build_config",
+ ]
+ }
+}
+
# This config should be applied to code using the libplatform.
config("libplatform_config") {
include_dirs = [ "include" ]
@@ -501,6 +523,15 @@ config("features") {
if (v8_enable_regexp_interpreter_threaded_dispatch) {
defines += [ "V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH" ]
}
+ if (v8_enable_snapshot_compression) {
+ defines += [ "V8_SNAPSHOT_COMPRESSION" ]
+ }
+ if (v8_control_flow_integrity) {
+ defines += [ "V8_ENABLE_CONTROL_FLOW_INTEGRITY" ]
+ }
+ if (v8_enable_wasm_gdb_remote_debugging) {
+ defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ]
+ }
}
config("toolchain") {
@@ -543,6 +574,12 @@ config("toolchain") {
}
if (v8_current_cpu == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
+ if (v8_control_flow_integrity) {
+ # TODO(v8:10026): Enable this in src/build.
+ if (current_cpu == "arm64") {
+ cflags += [ "-mbranch-protection=standard" ]
+ }
+ }
}
# Mips64el/mipsel simulators.
@@ -618,6 +655,7 @@ config("toolchain") {
}
if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390" ]
+ cflags += [ "-ffp-contract=off" ]
if (v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390X" ]
}
@@ -628,8 +666,9 @@ config("toolchain") {
}
}
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
- defines += [ "V8_TARGET_ARCH_PPC" ]
- if (v8_current_cpu == "ppc64") {
+ if (v8_current_cpu == "ppc") {
+ defines += [ "V8_TARGET_ARCH_PPC" ]
+ } else if (v8_current_cpu == "ppc64") {
defines += [ "V8_TARGET_ARCH_PPC64" ]
}
if (host_byteorder == "little") {
@@ -1138,11 +1177,14 @@ template("run_torque") {
"bit-fields-tq.h",
"builtin-definitions-tq.h",
"interface-descriptors-tq.inc",
+ "factory-tq.cc",
+ "factory-tq.inc",
"field-offsets-tq.h",
"class-verifiers-tq.cc",
"class-verifiers-tq.h",
"enum-verifiers-tq.cc",
"objects-printer-tq.cc",
+ "objects-body-descriptors-tq-inl.h",
"class-definitions-tq.cc",
"class-definitions-tq-inl.h",
"class-definitions-tq.h",
@@ -1264,6 +1306,7 @@ v8_source_set("torque_generated_definitions") {
"$target_gen_dir/torque-generated/class-definitions-tq.cc",
"$target_gen_dir/torque-generated/class-verifiers-tq.cc",
"$target_gen_dir/torque-generated/class-verifiers-tq.h",
+ "$target_gen_dir/torque-generated/factory-tq.cc",
"$target_gen_dir/torque-generated/objects-printer-tq.cc",
]
@@ -1631,11 +1674,16 @@ v8_source_set("v8_initializers") {
### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
- } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
+ } else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(arch:ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
]
+ } else if (v8_current_cpu == "ppc64") {
+ sources += [
+ ### gcmole(arch:ppc64) ###
+ "src/builtins/ppc/builtins-ppc.cc",
+ ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
### gcmole(arch:s390) ###
@@ -1684,6 +1732,7 @@ v8_header_set("v8_headers") {
public_configs = [ ":v8_header_features" ]
sources = [
+ "include/v8-fast-api-calls.h",
"include/v8-internal.h",
"include/v8.h",
"include/v8config.h",
@@ -1865,7 +1914,6 @@ v8_compiler_sources = [
"src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h",
"src/compiler/node-aux-data.h",
- "src/compiler/node-cache.cc",
"src/compiler/node-cache.h",
"src/compiler/node-marker.cc",
"src/compiler/node-marker.h",
@@ -2005,6 +2053,7 @@ v8_source_set("v8_base_without_compiler") {
### gcmole(all) ###
"$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
+ "include/v8-fast-api-calls.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
"include/v8-internal.h",
@@ -2235,6 +2284,9 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/messages.h",
"src/execution/microtask-queue.cc",
"src/execution/microtask-queue.h",
+ "src/execution/off-thread-isolate.cc",
+ "src/execution/off-thread-isolate.h",
+ "src/execution/pointer-authentication.h",
"src/execution/protectors-inl.h",
"src/execution/protectors.cc",
"src/execution/protectors.h",
@@ -2273,6 +2325,9 @@ v8_source_set("v8_base_without_compiler") {
"src/handles/handles-inl.h",
"src/handles/handles.cc",
"src/handles/handles.h",
+ "src/handles/local-handles-inl.h",
+ "src/handles/local-handles.cc",
+ "src/handles/local-handles.h",
"src/handles/maybe-handles-inl.h",
"src/handles/maybe-handles.h",
"src/heap/array-buffer-collector.cc",
@@ -2298,6 +2353,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/factory-inl.h",
"src/heap/factory.cc",
"src/heap/factory.h",
+ "src/heap/finalization-registry-cleanup-task.cc",
+ "src/heap/finalization-registry-cleanup-task.h",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
@@ -2321,6 +2378,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/item-parallel-job.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
+ "src/heap/local-heap.cc",
+ "src/heap/local-heap.h",
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
@@ -2346,6 +2405,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
"src/heap/remembered-set.h",
+ "src/heap/safepoint.cc",
+ "src/heap/safepoint.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
"src/heap/scavenger-inl.h",
@@ -2446,6 +2507,7 @@ v8_source_set("v8_base_without_compiler") {
"src/logging/log-utils.h",
"src/logging/log.cc",
"src/logging/log.h",
+ "src/logging/off-thread-logger.h",
"src/numbers/bignum-dtoa.cc",
"src/numbers/bignum-dtoa.h",
"src/numbers/bignum.cc",
@@ -2688,6 +2750,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/tagged-impl-inl.h",
"src/objects/tagged-impl.cc",
"src/objects/tagged-impl.h",
+ "src/objects/tagged-index.h",
"src/objects/tagged-value-inl.h",
"src/objects/tagged-value.h",
"src/objects/template-objects-inl.h",
@@ -2773,6 +2836,8 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/regexp-compiler.h",
"src/regexp/regexp-dotprinter.cc",
"src/regexp/regexp-dotprinter.h",
+ "src/regexp/regexp-error.cc",
+ "src/regexp/regexp-error.h",
"src/regexp/regexp-interpreter.cc",
"src/regexp/regexp-interpreter.h",
"src/regexp/regexp-macro-assembler-arch.h",
@@ -2858,6 +2923,8 @@ v8_source_set("v8_base_without_compiler") {
"src/snapshot/serializer.cc",
"src/snapshot/serializer.h",
"src/snapshot/snapshot-common.cc",
+ "src/snapshot/snapshot-compression.cc",
+ "src/snapshot/snapshot-compression.h",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
"src/snapshot/snapshot.h",
@@ -2964,6 +3031,8 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-manager.h",
"src/wasm/wasm-constants.h",
+ "src/wasm/wasm-debug-evaluate.cc",
+ "src/wasm/wasm-debug-evaluate.h",
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-engine.cc",
"src/wasm/wasm-engine.h",
@@ -3010,10 +3079,30 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone.h",
]
+ if (!v8_control_flow_integrity) {
+ sources += [ "src/execution/pointer-authentication-dummy.h" ]
+ }
+
if (v8_enable_third_party_heap) {
sources += v8_third_party_heap_files
}
+ if (v8_enable_wasm_gdb_remote_debugging) {
+ sources += [
+ "src/debug/wasm/gdb-server/gdb-server-thread.cc",
+ "src/debug/wasm/gdb-server/gdb-server-thread.h",
+ "src/debug/wasm/gdb-server/gdb-server.cc",
+ "src/debug/wasm/gdb-server/gdb-server.h",
+ "src/debug/wasm/gdb-server/session.cc",
+ "src/debug/wasm/gdb-server/session.h",
+ "src/debug/wasm/gdb-server/target.cc",
+ "src/debug/wasm/gdb-server/target.h",
+ "src/debug/wasm/gdb-server/transport.cc",
+ "src/debug/wasm/gdb-server/transport.h",
+ "src/debug/wasm/gdb-server/util.h",
+ ]
+ }
+
if (v8_check_header_includes) {
# This file will be generated by tools/generate-header-include-checks.py
# if the "check_v8_header_includes" gclient variable is set.
@@ -3167,6 +3256,9 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
+ if (v8_control_flow_integrity) {
+ sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ]
+ }
if (is_win) {
sources += [
"src/diagnostics/unwinding-info-win64.cc",
@@ -3227,7 +3319,7 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
- } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
+ } else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
@@ -3257,6 +3349,36 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
+ } else if (v8_current_cpu == "ppc64") {
+ sources += [ ### gcmole(arch:ppc64) ###
+ "src/codegen/ppc/assembler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc.cc",
+ "src/codegen/ppc/assembler-ppc.h",
+ "src/codegen/ppc/constants-ppc.cc",
+ "src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/cpu-ppc.cc",
+ "src/codegen/ppc/interface-descriptors-ppc.cc",
+ "src/codegen/ppc/macro-assembler-ppc.cc",
+ "src/codegen/ppc/macro-assembler-ppc.h",
+ "src/codegen/ppc/register-ppc.h",
+ "src/compiler/backend/ppc/code-generator-ppc.cc",
+ "src/compiler/backend/ppc/instruction-codes-ppc.h",
+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/backend/ppc/instruction-selector-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
+ "src/debug/ppc/debug-ppc.cc",
+ "src/deoptimizer/ppc/deoptimizer-ppc.cc",
+ "src/diagnostics/ppc/disasm-ppc.cc",
+ "src/diagnostics/ppc/eh-frame-ppc.cc",
+ "src/execution/ppc/frame-constants-ppc.cc",
+ "src/execution/ppc/frame-constants-ppc.h",
+ "src/execution/ppc/simulator-ppc.cc",
+ "src/execution/ppc/simulator-ppc.h",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.cc",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.h",
+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
+ ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
"src/codegen/s390/assembler-s390-inl.h",
@@ -3357,7 +3479,10 @@ v8_source_set("v8_base_without_compiler") {
]
}
- deps += [ "//third_party/zlib" ]
+ deps += [
+ "//third_party/zlib",
+ "//third_party/zlib/google:compression_utils_portable",
+ ]
if (v8_postmortem_support) {
sources += [ "$target_gen_dir/debug-support.cc" ]
@@ -3519,6 +3644,7 @@ v8_component("v8_libbase") {
"src/base/atomicops_internals_std.h",
"src/base/base-export.h",
"src/base/bit-field.h",
+ "src/base/bits-iterator.h",
"src/base/bits.cc",
"src/base/bits.h",
"src/base/bounded-page-allocator.cc",
@@ -3797,6 +3923,47 @@ v8_source_set("fuzzer_support") {
]
}
+v8_source_set("cppgc_base") {
+ visibility = [ ":*" ]
+
+ sources = [
+ "include/cppgc/allocation.h",
+ "include/cppgc/api-constants.h",
+ "include/cppgc/finalizer-trait.h",
+ "include/cppgc/garbage-collected.h",
+ "include/cppgc/gc-info.h",
+ "include/cppgc/heap.h",
+ "include/cppgc/platform.h",
+ "include/v8config.h",
+ "src/heap/cppgc/allocation.cc",
+ "src/heap/cppgc/gc-info-table.cc",
+ "src/heap/cppgc/gc-info-table.h",
+ "src/heap/cppgc/gc-info.cc",
+ "src/heap/cppgc/heap-inl.h",
+ "src/heap/cppgc/heap-object-header-inl.h",
+ "src/heap/cppgc/heap-object-header.cc",
+ "src/heap/cppgc/heap-object-header.h",
+ "src/heap/cppgc/heap.cc",
+ "src/heap/cppgc/heap.h",
+ "src/heap/cppgc/platform.cc",
+ "src/heap/cppgc/sanitizers.h",
+ "src/heap/cppgc/stack.cc",
+ "src/heap/cppgc/stack.h",
+ ]
+
+ if (target_cpu == "x64") {
+ if (is_win) {
+ sources += [ "src/heap/cppgc/asm/x64/push_registers_win.S" ]
+ } else {
+ sources += [ "src/heap/cppgc/asm/x64/push_registers.S" ]
+ }
+ }
+
+ configs = [ ":internal_config" ]
+
+ public_deps = [ ":v8_libbase" ]
+}
+
###############################################################################
# Produce a single static library for embedders
#
@@ -3843,6 +4010,12 @@ v8_static_library("wee8") {
]
}
+v8_static_library("cppgc") {
+ deps = [ ":cppgc_base" ]
+
+ configs = [ ":internal_config" ]
+}
+
###############################################################################
# Executables
#
@@ -4074,7 +4247,7 @@ group("v8_archive") {
if (is_fuchsia && !build_with_chromium) {
import("//build/config/fuchsia/rules.gni")
- fuchsia_package("d8_fuchsia_pkg") {
+ cr_fuchsia_package("d8_fuchsia_pkg") {
testonly = true
binary = ":d8"
package_name_override = "d8"
@@ -4134,6 +4307,15 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
+
+ v8_component("cppgc_for_testing") {
+ testonly = true
+
+ public_deps = [ ":cppgc_base" ]
+
+ configs = [ ":internal_config" ]
+ public_configs = [ ":external_config" ]
+ }
} else {
group("v8") {
public_deps = [
@@ -4157,6 +4339,14 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
+
+ group("cppgc_for_testing") {
+ testonly = true
+
+ public_deps = [ ":cppgc_base" ]
+
+ public_configs = [ ":external_config" ]
+ }
}
v8_executable("d8") {
@@ -4177,6 +4367,7 @@ v8_executable("d8") {
# the :external_config applied to it by virtue of depending on :v8, and
# you can't have both applied to the same target.
":internal_config_base",
+ ":v8_tracing_config",
]
deps = [
@@ -4203,7 +4394,7 @@ v8_executable("d8") {
}
if (v8_use_perfetto) {
- deps += [ "//third_party/perfetto/include/perfetto/tracing" ]
+ deps += [ "//third_party/perfetto/src/tracing:in_process_backend" ]
}
}
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index cf53fe80e0..1eee48173a 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -29,9 +29,11 @@ petermarshall@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
solanes@chromium.org
+syg@chromium.org
szuend@chromium.org
tebbi@chromium.org
-titzer@chromium.org
ulan@chromium.org
verwaest@chromium.org
+victorgomes@chromium.org
yangguo@chromium.org
+zhin@chromium.org
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 880ff6c4b8..1bc687beaf 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -4,13 +4,9 @@
gclient_gn_args_file = 'v8/build/config/gclient_args.gni'
gclient_gn_args = [
- 'checkout_aemu'
]
vars = {
- # By Default, do not checkout AEMU, as it is too big, as is done in Chromium.
- 'checkout_aemu': False,
-
# Fetches only the SDK boot images which match at least one of the whitelist
# entries in a comma-separated list.
#
@@ -35,10 +31,10 @@ vars = {
'check_v8_header_includes': False,
# GN CIPD package version.
- 'gn_version': 'git_revision:97cc440d84f050f99ff0161f9414bfa2ffa38f65',
+ 'gn_version': 'git_revision:5ed3c9cc67b090d5e311e4bd2aba072173e82db9',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:37a855b64d59b7f079c9a0e5368f2757099d14d3',
+ 'luci_go': 'git_revision:de73cf6c4bde86f0a9c8d54151b69b0154a398f1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -69,10 +65,6 @@ vars = {
# and whatever else without interference from each other.
'android_sdk_sources_version': '4gxhM8E62bvZpQs7Q3d0DinQaW0RLCIefhXrQBFkNy8C',
# Three lines of non-changing comments so that
- # the commit queue can handle CLs rolling android_sdk_tools_version
- # and whatever else without interference from each other.
- 'android_sdk_tools_version': 'wYcRQC2WHsw2dKWs4EA7fw9Qsyzu1ds1_fRjKmGxe5QC',
- # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
'android_sdk_tools-lint_version': '89hXqZYzCum3delB5RV7J_QyWkaRodqdtQS0s3LMh3wC',
@@ -80,15 +72,15 @@ vars = {
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '2f17606c25956e800b6c4670c294a03620e78551',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '26e9d485d01d6e0eb9dadd21df767a63494c8fea',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ea8b58b970c0c94b4a36270b806ee307547cd77e',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '2b2aec6506a810f8d7bd018609de2c2450b3c121',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'dbd3825b31041d782c5b504c59dcfb5ac7dda08c',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'd7aff76cf6bb0fbef3afa6c07718f78a80a70f8f',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'bb3f1802c237dd19105dd0f7919f99e536a39d10',
'v8/buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'afc5b798c72905e85f9991152be878714c579958',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '7977eb176752aeec29d888cfe8e677ac12ed1c41',
'v8/buildtools/clang_format/script':
Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
'v8/buildtools/linux64': {
@@ -112,11 +104,11 @@ deps = {
'condition': 'host_os == "mac"',
},
'v8/buildtools/third_party/libc++/trunk':
- Var('chromium_url') + '/chromium/llvm-project/libcxx.git' + '@' + '78d6a7767ed57b50122a161b91f59f19c9bd0d19',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'd9040c75cfea5928c804ab7c235fed06a63f743a',
'v8/buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/chromium/llvm-project/libcxxabi.git' + '@' + '0d529660e32d77d9111912d73f2c74fc5fa2a858',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '196ba1aaa8ac285d94f4ea8d9836390a45360533',
'v8/buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/llvm.org/libunwind.git' + '@' + '69d9b84cca8354117b9fe9705a4430d789ee599b',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '43bb9f872232f531bac80093ceb4de61c64b9ab7',
'v8/buildtools/win': {
'packages': [
{
@@ -128,7 +120,7 @@ deps = {
'condition': 'host_os == "win"',
},
'v8/base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'bd79231eb1f9e7de2efb4ad79e530d9a7e70d9a5',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'dab187b372fc17e51f5b9fad8201813d0aed5129',
'v8/third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '27c0a8d090c666a50e40fceb4ee5b40b1a2d3f87',
'condition': 'checkout_android',
@@ -168,10 +160,6 @@ deps = {
'version': Var('android_sdk_sources_version'),
},
{
- 'package': 'chromium/third_party/android_sdk/public/tools',
- 'version': Var('android_sdk_tools_version'),
- },
- {
'package': 'chromium/third_party/android_sdk/public/tools-lint',
'version': Var('android_sdk_tools-lint_version'),
},
@@ -180,7 +168,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '2e0a0cb9ad546be8c835e65d7537507cb7896e03',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '032c78376792ef343ea361bca2181ba6dec6b95f',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -188,23 +176,23 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '19c8ac5e150fbd147ec5987425a41aa9e97098b2',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '2457e41d8dc379f74662d3157e76339ba92cee06',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '306f3754a71d6d1ac644681d3544d06744914228',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '10b1902d893ea8cc43c69541d70868f91af3646b',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '885b3febcc170a60f25795304e60927b77d1e92d',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'cc958279ffd6853e0a1b227a7e957ca334fe56af',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '28b4fcca4b1b1d278dfe0cc0e69c7d9d59b31aab',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f6b2ccdd091ff82da54150796297c3a96d7edb41',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
'v8/third_party/qemu-linux-x64': {
@@ -227,8 +215,28 @@ deps = {
'condition': 'host_os == "mac" and checkout_fuchsia',
'dep_type': 'cipd',
},
+ 'v8/third_party/aemu-linux-x64': {
+ 'packages': [
+ {
+ 'package': 'fuchsia/third_party/aemu/linux-amd64',
+ 'version': '7YlCgase5GlIanqHn-nZClSlZ5kQETJyVUYRF7Jjy6UC'
+ },
+ ],
+ 'condition': 'host_os == "linux" and checkout_fuchsia',
+ 'dep_type': 'cipd',
+ },
+ 'v8/third_party/aemu-mac-x64': {
+ 'packages': [
+ {
+ 'package': 'fuchsia/third_party/aemu/mac-amd64',
+ 'version': 'T9bWxf8aUC5TwCFgPxpuW29Mfy-7Z9xCfXB9QO8MfU0C'
+ },
+ ],
+ 'condition': 'host_os == "mac" and checkout_fuchsia',
+ 'dep_type': 'cipd',
+ },
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '535dbf16a84c7fc238f7ed11b5a75381407e38f6',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '105a8460911176861a422738eee4daad8dfe88a2',
'v8/tools/luci-go': {
'packages': [
{
@@ -258,11 +266,11 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + '12dc10e0278cded35205cf84f80a821348cb6c56',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + 'b9b24d1b0b80aafec393af085067e9eae829412f',
'v8/third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
'v8/third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'b9b9a5af7cca2e683e5f2aead8418e5bf9d5a7d5',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '156be8c52f80cde343088b4a69a80579101b6e67',
'v8/third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 3f741d816f..a7bffbdbeb 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -61,28 +61,30 @@
// current process id, thread id, and a timestamp in microseconds.
//
// To trace an asynchronous procedure such as an IPC send/receive, use
-// ASYNC_BEGIN and ASYNC_END:
+// NESTABLE_ASYNC_BEGIN and NESTABLE_ASYNC_END:
// [single threaded sender code]
// static int send_count = 0;
// ++send_count;
-// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "ipc", "message", TRACE_ID_LOCAL(send_count));
// Send(new MyMessage(send_count));
// [receive code]
// void OnMyMessage(send_count) {
-// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// TRACE_NESTABLE_EVENT_ASYNC_END0(
+// "ipc", "message", TRACE_ID_LOCAL(send_count));
// }
-// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
-// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
-// Pointers can be used for the ID parameter, and they will be annotated
-// internally so that the same pointer on two different processes will not
-// match. For example:
+// The third parameter is a unique ID to match NESTABLE_ASYNC_BEGIN/ASYNC_END
+// pairs. NESTABLE_ASYNC_BEGIN and ASYNC_END can occur on any thread of any
+// traced process. // Pointers can be used for the ID parameter, and they will
+// be annotated internally so that the same pointer on two different processes
+// will not match. For example:
// class MyTracedClass {
// public:
// MyTracedClass() {
-// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("category", "MyTracedClass", this);
// }
// ~MyTracedClass() {
-// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// TRACE_EVENT_NESTABLE_ASYNC_END0("category", "MyTracedClass", this);
// }
// }
//
@@ -512,9 +514,11 @@
name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
arg1_val)
-// ASYNC_STEP_* APIs should be only used by legacy code. New code should
-// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
-// event.
+// -- TRACE_EVENT_ASYNC is DEPRECATED! --
+//
+// TRACE_EVENT_ASYNC_* APIs should be only used by legacy code. New code should
+// use TRACE_EVENT_NESTABLE_ASYNC_* APIs instead.
+//
// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -566,9 +570,6 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, flags) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, flags)
// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
// provided.
@@ -595,11 +596,6 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0( \
- category_group, name, id, timestamp, flags) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
// category is not enabled, then this does nothing. The |name| and |id| must
@@ -671,9 +667,6 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_ASYNC_END_WITH_FLAGS0(category_group, name, id, flags) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, flags)
// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
@@ -699,11 +692,6 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(category_group, name, \
- id, timestamp, flags) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -742,6 +730,10 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, \
+ flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, flags)
// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
// or 2 associated arguments. If the category is not enabled, then this does
// nothing.
@@ -761,6 +753,10 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_FLAGS0(category_group, name, id, \
+ flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, flags)
// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
// with none, one or two associated argument. If the category is not enabled,
@@ -808,6 +804,11 @@
TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0( \
+ category_group, name, id, timestamp, flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -826,6 +827,11 @@
TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0( \
+ category_group, name, id, timestamp, flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0( \
category_group, name, id, timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni
index b16d8f93bd..eca3ffb84e 100644
--- a/deps/v8/gni/proto_library.gni
+++ b/deps/v8/gni/proto_library.gni
@@ -124,9 +124,13 @@ template("proto_library") {
rebase_path(proto_in_dir, root_build_dir),
]
if (generate_cc) {
+ cc_generator_options_ = ""
+ if (defined(invoker.cc_generator_options)) {
+ cc_generator_options_ = invoker.cc_generator_options
+ }
args += [
"--cpp_out",
- rel_cc_out_dir,
+ cc_generator_options_ + rel_cc_out_dir,
]
}
if (generate_descriptor != "") {
@@ -153,13 +157,9 @@ template("proto_library") {
args += rebase_path(proto_sources, root_build_dir)
- inputs = [
- protoc_path,
- ]
+ inputs = [ protoc_path ]
- deps = [
- protoc_label,
- ]
+ deps = [ protoc_label ]
if (generate_with_plugin) {
inputs += [ plugin_path ]
if (defined(plugin_host_label)) {
@@ -201,21 +201,23 @@ template("proto_library") {
public_configs = []
}
- public_configs += [
- "//:protobuf_gen_config",
- ":$config_name",
- ]
+ public_configs += [ "//:protobuf_gen_config" ]
+
+ propagate_imports_configs = !defined(invoker.propagate_imports_configs) ||
+ invoker.propagate_imports_configs
+ if (propagate_imports_configs) {
+ public_configs += [ ":$config_name" ]
+ } else {
+ # Embedder handles include directory propagation to dependents.
+ configs += [ ":$config_name" ]
+ }
# Use protobuf_full only for tests.
if (defined(invoker.use_protobuf_full) &&
invoker.use_protobuf_full == true) {
- deps = [
- "//:protobuf_full",
- ]
+ deps = [ "//:protobuf_full" ]
} else {
- deps = [
- "//:protobuf_lite",
- ]
+ deps = [ "//:protobuf_lite" ]
}
deps += [ ":$action_name" ]
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index eaf76a471b..0b2806ca94 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -57,6 +57,12 @@ declare_args() {
# Use Perfetto (https://perfetto.dev) as the default TracingController. Not
# currently implemented.
v8_use_perfetto = false
+
+ # Override global symbol level setting for v8
+ v8_symbol_level = symbol_level
+
+ # Enable WebAssembly debugging via GDB-remote protocol.
+ v8_enable_wasm_gdb_remote_debugging = false
}
if (v8_use_external_startup_data == "") {
@@ -109,6 +115,13 @@ if (is_debug && !v8_optimized_debug) {
}
}
+if (!is_debug) {
+ v8_remove_configs += [
+ # Too much performance impact, unclear security benefit.
+ "//build/config/compiler:default_init_stack_vars",
+ ]
+}
+
if (v8_code_coverage && !is_clang) {
v8_add_configs += [
v8_path_prefix + ":v8_gcov_coverage_cflags",
@@ -116,6 +129,19 @@ if (v8_code_coverage && !is_clang) {
]
}
+if (v8_symbol_level != symbol_level) {
+ v8_remove_configs += [ "//build/config/compiler:default_symbols" ]
+ if (v8_symbol_level == 0) {
+ v8_add_configs += [ "//build/config/compiler:no_symbols" ]
+ } else if (v8_symbol_level == 1) {
+ v8_add_configs += [ "//build/config/compiler:minimal_symbols" ]
+ } else if (v8_symbol_level == 2) {
+ v8_add_configs += [ "//build/config/compiler:symbols" ]
+ } else {
+ assert(false)
+ }
+}
+
if ((is_posix || is_fuchsia) &&
(v8_enable_backtrace || v8_monolithic || v8_expose_symbols)) {
v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
@@ -217,6 +243,25 @@ template("v8_component") {
}
}
+template("v8_shared_library") {
+ shared_library(target_name) {
+ forward_variables_from(invoker,
+ "*",
+ [
+ "configs",
+ "remove_configs",
+ ])
+ configs -= v8_remove_configs
+ configs += v8_add_configs
+ if (defined(invoker.remove_configs)) {
+ configs -= invoker.remove_configs
+ }
+ if (defined(invoker.configs)) {
+ configs += invoker.configs
+ }
+ }
+}
+
template("v8_static_library") {
static_library(target_name) {
complete_static_lib = true
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index 1e0794df7a..4f90a5c8c7 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -1,5 +1,6 @@
adamk@chromium.org
danno@chromium.org
+mlippautz@chromium.org
ulan@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/include/cppgc/README.md b/deps/v8/include/cppgc/README.md
new file mode 100644
index 0000000000..3a2db6dfa9
--- /dev/null
+++ b/deps/v8/include/cppgc/README.md
@@ -0,0 +1,5 @@
+# C++ Garbage Collection
+
+This directory provides an open-source garbage collection library for C++.
+
+The library is under construction, meaning that *all APIs in this directory are incomplete and considered unstable and should not be used*. \ No newline at end of file
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
new file mode 100644
index 0000000000..3e717ad7d4
--- /dev/null
+++ b/deps/v8/include/cppgc/allocation.h
@@ -0,0 +1,91 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_ALLOCATION_H_
+#define INCLUDE_CPPGC_ALLOCATION_H_
+
+#include <stdint.h>
+#include <atomic>
+
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/gc-info.h"
+#include "include/cppgc/heap.h"
+#include "include/cppgc/internals.h"
+
+namespace cppgc {
+
+template <typename T>
+class MakeGarbageCollectedTraitBase;
+
+namespace internal {
+
+class V8_EXPORT MakeGarbageCollectedTraitInternal {
+ protected:
+ static inline void MarkObjectAsFullyConstructed(const void* payload) {
+ // See api_constants for an explanation of the constants.
+ std::atomic<uint16_t>* atomic_mutable_bitfield =
+ reinterpret_cast<std::atomic<uint16_t>*>(
+ const_cast<uint16_t*>(reinterpret_cast<const uint16_t*>(
+ reinterpret_cast<const uint8_t*>(payload) -
+ api_constants::kFullyConstructedBitFieldOffsetFromPayload)));
+ uint16_t value = atomic_mutable_bitfield->load(std::memory_order_relaxed);
+ value = value | api_constants::kFullyConstructedBitMask;
+ atomic_mutable_bitfield->store(value, std::memory_order_release);
+ }
+
+ static void* Allocate(cppgc::Heap* heap, size_t size, GCInfoIndex index);
+
+ friend class HeapObjectHeader;
+};
+
+} // namespace internal
+
+// Users with custom allocation needs (e.g. overriding size) should override
+// MakeGarbageCollectedTrait (see below) and inherit their trait from
+// MakeGarbageCollectedTraitBase to get access to low-level primitives.
+template <typename T>
+class MakeGarbageCollectedTraitBase
+ : private internal::MakeGarbageCollectedTraitInternal {
+ protected:
+ // Allocates an object of |size| bytes on |heap|.
+ //
+ // TODO(mlippautz): Allow specifying arena for specific embedder uses.
+ static void* Allocate(Heap* heap, size_t size) {
+ return internal::MakeGarbageCollectedTraitInternal::Allocate(
+ heap, size, internal::GCInfoTrait<T>::Index());
+ }
+
+ // Marks an object as being fully constructed, resulting in precise handling
+ // by the garbage collector.
+ static void MarkObjectAsFullyConstructed(const void* payload) {
+ // internal::MarkObjectAsFullyConstructed(payload);
+ internal::MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
+ payload);
+ }
+};
+
+template <typename T>
+class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
+ public:
+ template <typename... Args>
+ static T* Call(Heap* heap, Args&&... args) {
+ static_assert(internal::IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ void* memory = MakeGarbageCollectedTraitBase<T>::Allocate(heap, sizeof(T));
+ T* object = ::new (memory) T(std::forward<Args>(args)...);
+ MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed(object);
+ return object;
+ }
+};
+
+// Default MakeGarbageCollected: Constructs an instance of T, which is a garbage
+// collected type.
+template <typename T, typename... Args>
+T* MakeGarbageCollected(Heap* heap, Args&&... args) {
+ return MakeGarbageCollectedTrait<T>::Call(heap, std::forward<Args>(args)...);
+}
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_ALLOCATION_H_
diff --git a/deps/v8/include/cppgc/finalizer-trait.h b/deps/v8/include/cppgc/finalizer-trait.h
new file mode 100644
index 0000000000..12216ed84e
--- /dev/null
+++ b/deps/v8/include/cppgc/finalizer-trait.h
@@ -0,0 +1,90 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_FINALIZER_TRAIT_H_
+#define INCLUDE_CPPGC_FINALIZER_TRAIT_H_
+
+#include <type_traits>
+
+#include "include/cppgc/internals.h"
+
+namespace cppgc {
+namespace internal {
+
+using FinalizationCallback = void (*)(void*);
+
+template <typename T, typename = void>
+struct HasFinalizeGarbageCollectedObject : std::false_type {};
+
+template <typename T>
+struct HasFinalizeGarbageCollectedObject<
+ T, void_t<decltype(std::declval<T>().FinalizeGarbageCollectedObject())>>
+ : std::true_type {};
+
+// The FinalizerTraitImpl specifies how to finalize objects.
+template <typename T, bool isFinalized>
+struct FinalizerTraitImpl;
+
+template <typename T>
+struct FinalizerTraitImpl<T, true> {
+ private:
+ // Dispatch to custom FinalizeGarbageCollectedObject().
+ struct Custom {
+ static void Call(void* obj) {
+ static_cast<T*>(obj)->FinalizeGarbageCollectedObject();
+ }
+ };
+
+ // Dispatch to regular destructor.
+ struct Destructor {
+ static void Call(void* obj) { static_cast<T*>(obj)->~T(); }
+ };
+
+ using FinalizeImpl =
+ std::conditional_t<HasFinalizeGarbageCollectedObject<T>::value, Custom,
+ Destructor>;
+
+ public:
+ static void Finalize(void* obj) {
+ static_assert(sizeof(T), "T must be fully defined");
+ FinalizeImpl::Call(obj);
+ }
+};
+
+template <typename T>
+struct FinalizerTraitImpl<T, false> {
+ static void Finalize(void* obj) {
+ static_assert(sizeof(T), "T must be fully defined");
+ }
+};
+
+// The FinalizerTrait is used to determine if a type requires finalization and
+// what finalization means.
+template <typename T>
+struct FinalizerTrait {
+ private:
+ // Object has a finalizer if it has
+ // - a custom FinalizeGarbageCollectedObject method, or
+ // - a destructor.
+ static constexpr bool kNonTrivialFinalizer =
+ internal::HasFinalizeGarbageCollectedObject<T>::value ||
+ !std::is_trivially_destructible<typename std::remove_cv<T>::type>::value;
+
+ static void Finalize(void* obj) {
+ internal::FinalizerTraitImpl<T, kNonTrivialFinalizer>::Finalize(obj);
+ }
+
+ public:
+ // The callback used to finalize an object of type T.
+ static constexpr FinalizationCallback kCallback =
+ kNonTrivialFinalizer ? Finalize : nullptr;
+};
+
+template <typename T>
+constexpr FinalizationCallback FinalizerTrait<T>::kCallback;
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_FINALIZER_TRAIT_H_
diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h
new file mode 100644
index 0000000000..6c62daafdc
--- /dev/null
+++ b/deps/v8/include/cppgc/garbage-collected.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
+#define INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
+
+#include <type_traits>
+
+#include "include/cppgc/internals.h"
+#include "include/cppgc/platform.h"
+
+namespace cppgc {
+namespace internal {
+
+template <typename T, typename = void>
+struct IsGarbageCollectedType : std::false_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedType<
+ T, void_t<typename std::remove_const_t<T>::IsGarbageCollectedTypeMarker>>
+ : std::true_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+} // namespace internal
+
+template <typename>
+class GarbageCollected {
+ public:
+ using IsGarbageCollectedTypeMarker = void;
+
+ // Must use MakeGarbageCollected.
+ void* operator new(size_t) = delete;
+ void* operator new[](size_t) = delete;
+ // The garbage collector is taking care of reclaiming the object. Also,
+ // virtual destructor requires an unambiguous, accessible 'operator delete'.
+ void operator delete(void*) {
+#ifdef V8_ENABLE_CHECKS
+ internal::Abort();
+#endif // V8_ENABLE_CHECKS
+ }
+ void operator delete[](void*) = delete;
+
+ protected:
+ GarbageCollected() = default;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
diff --git a/deps/v8/include/cppgc/gc-info.h b/deps/v8/include/cppgc/gc-info.h
new file mode 100644
index 0000000000..987ba34fa4
--- /dev/null
+++ b/deps/v8/include/cppgc/gc-info.h
@@ -0,0 +1,43 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_GC_INFO_H_
+#define INCLUDE_CPPGC_GC_INFO_H_
+
+#include <stdint.h>
+
+#include "include/cppgc/finalizer-trait.h"
+#include "include/v8config.h"
+
+namespace cppgc {
+namespace internal {
+
+using GCInfoIndex = uint16_t;
+
+class V8_EXPORT RegisteredGCInfoIndex final {
+ public:
+ RegisteredGCInfoIndex(FinalizationCallback finalization_callback,
+ bool has_v_table);
+ GCInfoIndex GetIndex() const { return index_; }
+
+ private:
+ const GCInfoIndex index_;
+};
+
+// Trait determines how the garbage collector treats objects wrt. to traversing,
+// finalization, and naming.
+template <typename T>
+struct GCInfoTrait {
+ static GCInfoIndex Index() {
+ static_assert(sizeof(T), "T must be fully defined");
+ static const RegisteredGCInfoIndex registered_index(
+ FinalizerTrait<T>::kCallback, std::is_polymorphic<T>::value);
+ return registered_index.GetIndex();
+ }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_GC_INFO_H_
diff --git a/deps/v8/include/cppgc/heap.h b/deps/v8/include/cppgc/heap.h
new file mode 100644
index 0000000000..a0568d534f
--- /dev/null
+++ b/deps/v8/include/cppgc/heap.h
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_H_
+#define INCLUDE_CPPGC_HEAP_H_
+
+#include <memory>
+
+#include "include/v8config.h"
+
+namespace cppgc {
+namespace internal {
+class Heap;
+} // namespace internal
+
+class V8_EXPORT Heap {
+ public:
+ static std::unique_ptr<Heap> Create();
+
+ virtual ~Heap() = default;
+
+ private:
+ Heap() = default;
+
+ friend class internal::Heap;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_H_
diff --git a/deps/v8/include/cppgc/internals.h b/deps/v8/include/cppgc/internals.h
new file mode 100644
index 0000000000..1e57779758
--- /dev/null
+++ b/deps/v8/include/cppgc/internals.h
@@ -0,0 +1,41 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNALS_H_
+#define INCLUDE_CPPGC_INTERNALS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8config.h"
+
+namespace cppgc {
+namespace internal {
+
+// Pre-C++17 custom implementation of std::void_t.
+template <typename... Ts>
+struct make_void {
+ typedef void type;
+};
+template <typename... Ts>
+using void_t = typename make_void<Ts...>::type;
+
+// Embedders should not rely on this code!
+
+// Internal constants to avoid exposing internal types on the API surface.
+namespace api_constants {
+// Offset of the uint16_t bitfield from the payload contaning the
+// in-construction bit. This is subtracted from the payload pointer to get
+// to the right bitfield.
+static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload =
+ 2 * sizeof(uint16_t);
+// Mask for in-construction bit.
+static constexpr size_t kFullyConstructedBitMask = size_t{1};
+
+} // namespace api_constants
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNALS_H_
diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h
new file mode 100644
index 0000000000..f216c2730a
--- /dev/null
+++ b/deps/v8/include/cppgc/platform.h
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_PLATFORM_H_
+#define INCLUDE_CPPGC_PLATFORM_H_
+
+#include "include/v8-platform.h"
+#include "include/v8config.h"
+
+namespace cppgc {
+
+// TODO(v8:10346): Put PageAllocator in a non-V8 include header to avoid
+// depending on namespace v8.
+using PageAllocator = v8::PageAllocator;
+
+// Initializes the garbage collector with the provided platform. Must be called
+// before creating a Heap.
+V8_EXPORT void InitializePlatform(PageAllocator* page_allocator);
+
+// Must be called after destroying the last used heap.
+V8_EXPORT void ShutdownPlatform();
+
+namespace internal {
+
+V8_EXPORT void Abort();
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_PLATFORM_H_
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 28b8e61076..3f5410d1e1 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -119,6 +119,7 @@ domain Debugger
script
eval
module
+ wasm-expression-stack
# Object representing the scope. For `global` and `with` scopes it represents the actual
# object; for the rest of the scopes, it is artificial transient object enumerating scope
# variables as its properties.
@@ -273,6 +274,13 @@ domain Debugger
# Resumes JavaScript execution.
command resume
+ parameters
+ # Set to true to terminate execution upon resuming execution. In contrast
+ # to Runtime.terminateExecution, this will allows to execute further
+ # JavaScript (i.e. via evaluation) until execution of the paused code
+ # is actually resumed, at which point termination is triggered.
+ # If execution is currently not paused, this parameter has no effect.
+ optional boolean terminateOnResume
# Searches for given string in script content.
command searchInContent
@@ -496,6 +504,12 @@ domain Debugger
# Fired when the virtual machine resumed execution.
event resumed
+ # Enum of possible script languages.
+ type ScriptLanguage extends string
+ enum
+ JavaScript
+ WebAssembly
+
# Fired when virtual machine fails to parse the script.
event scriptFailedToParse
parameters
@@ -527,6 +541,10 @@ domain Debugger
optional integer length
# JavaScript top stack frame of where the script parsed event was triggered if available.
experimental optional Runtime.StackTrace stackTrace
+ # If the scriptLanguage is WebAssembly, the code section offset in the module.
+ experimental optional integer codeOffset
+ # The language of the script.
+ experimental optional Debugger.ScriptLanguage scriptLanguage
# Fired when virtual machine parses script. This event is also fired for all known and uncollected
# scripts upon enabling debugger.
@@ -562,6 +580,10 @@ domain Debugger
optional integer length
# JavaScript top stack frame of where the script parsed event was triggered if available.
experimental optional Runtime.StackTrace stackTrace
+ # If the scriptLanguage is WebAssembly, the code section offset in the module.
+ experimental optional integer codeOffset
+ # The language of the script.
+ experimental optional Debugger.ScriptLanguage scriptLanguage
experimental domain HeapProfiler
depends on Runtime
@@ -824,6 +846,8 @@ domain Profiler
optional boolean callCount
# Collect block-based coverage.
optional boolean detailed
+ # Allow the backend to send updates on its own initiative
+ optional boolean allowTriggeredUpdates
returns
# Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
number timestamp
@@ -931,7 +955,8 @@ domain Runtime
boolean
symbol
bigint
- # Object subtype hint. Specified for `object` type values only.
+ wasm
+ # Object subtype hint. Specified for `object` or `wasm` type values only.
optional enum subtype
array
null
@@ -950,6 +975,11 @@ domain Runtime
typedarray
arraybuffer
dataview
+ i32
+ i64
+ f32
+ f64
+ v128
# Object class (constructor) name. Specified for `object` type values only.
optional string className
# Remote object value in case of primitive values or JSON values (if it was requested).
@@ -1306,7 +1336,9 @@ domain Runtime
experimental optional TimeDelta timeout
# Disable breakpoints during execution.
experimental optional boolean disableBreaks
- # Reserved flag for future REPL mode support. Setting this flag has currently no effect.
+ # Setting this flag to true enables `let` re-declaration and top-level `await`.
+ # Note that `let` variables can only be re-declared if they originate from
+ # `replMode` themselves.
experimental optional boolean replMode
returns
# Evaluation result.
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
new file mode 100644
index 0000000000..bfce66b652
--- /dev/null
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -0,0 +1,408 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * This file provides additional API on top of the default one for making
+ * API calls, which come from embedder C++ functions. The functions are being
+ * called directly from optimized code, doing all the necessary typechecks
+ * in the compiler itself, instead of on the embedder side. Hence the "fast"
+ * in the name. Example usage might look like:
+ *
+ * \code
+ * void FastMethod(int param, bool another_param);
+ *
+ * v8::FunctionTemplate::New(isolate, SlowCallback, data,
+ * signature, length, constructor_behavior
+ * side_effect_type,
+ * &v8::CFunction::Make(FastMethod));
+ * \endcode
+ *
+ * An example for custom embedder type support might employ a way to wrap/
+ * unwrap various C++ types in JSObject instances, e.g:
+ *
+ * \code
+ *
+ * // Represents the way this type system maps C++ and JS values.
+ * struct WrapperTypeInfo {
+ * // Store e.g. a method to map from exposed C++ types to the already
+ * // created v8::FunctionTemplate's for instantiating them.
+ * };
+ *
+ * // Helper method with a sanity check.
+ * template <typename T, int offset>
+ * inline T* GetInternalField(v8::Local<v8::Object> wrapper) {
+ * assert(offset < wrapper->InternalFieldCount());
+ * return reinterpret_cast<T*>(
+ * wrapper->GetAlignedPointerFromInternalField(offset));
+ * }
+ *
+ * // Returns the type info from a wrapper JS object.
+ * inline const WrapperTypeInfo* ToWrapperTypeInfo(
+ * v8::Local<v8::Object> wrapper) {
+ * return GetInternalField<WrapperTypeInfo,
+ * kV8EmbedderWrapperTypeIndex>(wrapper);
+ * }
+ *
+ * class CustomEmbedderType {
+ * public:
+ * static constexpr const WrapperTypeInfo* GetWrapperTypeInfo() {
+ * return &custom_type_wrapper_type_info;
+ * }
+ * // Returns the raw C object from a wrapper JS object.
+ * static CustomEmbedderType* Unwrap(v8::Local<v8::Object> wrapper) {
+ * return GetInternalField<CustomEmbedderType,
+ * kV8EmbedderWrapperObjectIndex>(wrapper);
+ * }
+ * static void FastMethod(CustomEmbedderType* receiver, int param) {
+ * assert(receiver != nullptr);
+ * // Type checks are already done by the optimized code.
+ * // Then call some performance-critical method like:
+ * // receiver->Method(param);
+ * }
+ *
+ * static void SlowMethod(
+ * const v8::FunctionCallbackInfo<v8::Value>& info) {
+ * v8::Local<v8::Object> instance =
+ * v8::Local<v8::Object>::Cast(info.Holder());
+ * CustomEmbedderType* receiver = Unwrap(instance);
+ * // TODO: Do type checks and extract {param}.
+ * FastMethod(receiver, param);
+ * }
+ *
+ * private:
+ * static const WrapperTypeInfo custom_type_wrapper_type_info;
+ * };
+ *
+ * // Support for custom embedder types via specialization of WrapperTraits.
+ * namespace v8 {
+ * template <>
+ * class WrapperTraits<CustomEmbedderType> {
+ * public:
+ * static const void* GetTypeInfo() {
+ * // We use the already defined machinery for the custom type.
+ * return CustomEmbedderType::GetWrapperTypeInfo();
+ * }
+ * };
+ * } // namespace v8
+ *
+ * // The constants kV8EmbedderWrapperTypeIndex and
+ * // kV8EmbedderWrapperObjectIndex describe the offsets for the type info
+ * // struct (the one returned by WrapperTraits::GetTypeInfo) and the
+ * // native object, when expressed as internal field indices within a
+ * // JSObject. The existance of this helper function assumes that all
+ * // embedder objects have their JSObject-side type info at the same
+ * // offset, but this is not a limitation of the API itself. For a detailed
+ * // use case, see the third example.
+ * static constexpr int kV8EmbedderWrapperTypeIndex = 0;
+ * static constexpr int kV8EmbedderWrapperObjectIndex = 1;
+ *
+ * // The following setup function can be templatized based on
+ * // the {embedder_object} argument.
+ * void SetupCustomEmbedderObject(v8::Isolate* isolate,
+ * v8::Local<v8::Context> context,
+ * CustomEmbedderType* embedder_object) {
+ * isolate->set_embedder_wrapper_type_index(
+ * kV8EmbedderWrapperTypeIndex);
+ * isolate->set_embedder_wrapper_object_index(
+ * kV8EmbedderWrapperObjectIndex);
+ *
+ * v8::CFunction c_func =
+ * MakeV8CFunction(CustomEmbedderType::FastMethod);
+ *
+ * Local<v8::FunctionTemplate> method_template =
+ * v8::FunctionTemplate::New(
+ * isolate, CustomEmbedderType::SlowMethod, v8::Local<v8::Value>(),
+ * v8::Local<v8::Signature>(), 1, v8::ConstructorBehavior::kAllow,
+ * v8::SideEffectType::kHasSideEffect, &c_func);
+ *
+ * v8::Local<v8::ObjectTemplate> object_template =
+ * v8::ObjectTemplate::New(isolate);
+ * object_template->SetInternalFieldCount(
+ * kV8EmbedderWrapperObjectIndex + 1);
+ * object_template->Set(
+ v8::String::NewFromUtf8Literal(isolate, "method"), method_template);
+ *
+ * // Instantiate the wrapper JS object.
+ * v8::Local<v8::Object> object =
+ * object_template->NewInstance(context).ToLocalChecked();
+ * object->SetAlignedPointerInInternalField(
+ * kV8EmbedderWrapperObjectIndex,
+ * reinterpret_cast<void*>(embedder_object));
+ *
+ * // TODO: Expose {object} where it's necessary.
+ * }
+ * \endcode
+ *
+ * For instance if {object} is exposed via a global "obj" variable,
+ * one could write in JS:
+ * function hot_func() {
+ * obj.method(42);
+ * }
+ * and once {hot_func} gets optimized, CustomEmbedderType::FastMethod
+ * will be called instead of the slow version, with the following arguments:
+ * receiver := the {embedder_object} from above
+ * param := 42
+ *
+ * Currently only void return types are supported.
+ * Currently supported argument types:
+ * - pointer to an embedder type
+ * - bool
+ * - int32_t
+ * - uint32_t
+ * To be supported types:
+ * - int64_t
+ * - uint64_t
+ * - float32_t
+ * - float64_t
+ * - arrays of C types
+ * - arrays of embedder types
+ */
+
+#ifndef INCLUDE_V8_FAST_API_CALLS_H_
+#define INCLUDE_V8_FAST_API_CALLS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8config.h" // NOLINT(build/include)
+
+namespace v8 {
+
+class CTypeInfo {
+ public:
+ enum class Type : char {
+ kVoid,
+ kBool,
+ kInt32,
+ kUint32,
+ kInt64,
+ kUint64,
+ kFloat32,
+ kFloat64,
+ kUnwrappedApiObject,
+ };
+
+ enum ArgFlags : char {
+ None = 0,
+ IsArrayBit = 1 << 0, // This argument is first in an array of values.
+ };
+
+ static CTypeInfo FromWrapperType(const void* wrapper_type_info,
+ ArgFlags flags = ArgFlags::None) {
+ uintptr_t wrapper_type_info_ptr =
+ reinterpret_cast<uintptr_t>(wrapper_type_info);
+ // Check that the lower kIsWrapperTypeBit bits are 0's.
+ CHECK_EQ(
+ wrapper_type_info_ptr & ~(static_cast<uintptr_t>(~0)
+ << static_cast<uintptr_t>(kIsWrapperTypeBit)),
+ 0);
+ // TODO(mslekova): Refactor the manual bit manipulations to use
+ // PointerWithPayload instead.
+ return CTypeInfo(wrapper_type_info_ptr | flags | kIsWrapperTypeBit);
+ }
+
+ static constexpr CTypeInfo FromCType(Type ctype,
+ ArgFlags flags = ArgFlags::None) {
+ // ctype cannot be Type::kUnwrappedApiObject.
+ return CTypeInfo(
+ ((static_cast<uintptr_t>(ctype) << kTypeOffset) & kTypeMask) | flags);
+ }
+
+ const void* GetWrapperInfo() const;
+
+ constexpr Type GetType() const {
+ if (payload_ & kIsWrapperTypeBit) {
+ return Type::kUnwrappedApiObject;
+ }
+ return static_cast<Type>((payload_ & kTypeMask) >> kTypeOffset);
+ }
+
+ constexpr bool IsArray() const { return payload_ & ArgFlags::IsArrayBit; }
+
+ private:
+ explicit constexpr CTypeInfo(uintptr_t payload) : payload_(payload) {}
+
+ // That must be the last bit after ArgFlags.
+ static constexpr uintptr_t kIsWrapperTypeBit = 1 << 1;
+ static constexpr uintptr_t kWrapperTypeInfoMask = static_cast<uintptr_t>(~0)
+ << 2;
+
+ static constexpr unsigned int kTypeOffset = kIsWrapperTypeBit;
+ static constexpr unsigned int kTypeSize = 8 - kTypeOffset;
+ static constexpr uintptr_t kTypeMask =
+ (~(static_cast<uintptr_t>(~0) << kTypeSize)) << kTypeOffset;
+
+ const uintptr_t payload_;
+};
+
+class CFunctionInfo {
+ public:
+ virtual const CTypeInfo& ReturnInfo() const = 0;
+ virtual unsigned int ArgumentCount() const = 0;
+ virtual const CTypeInfo& ArgumentInfo(unsigned int index) const = 0;
+};
+
+template <typename T>
+class WrapperTraits {
+ public:
+ static const void* GetTypeInfo() {
+ static_assert(sizeof(T) != sizeof(T),
+ "WrapperTraits must be specialized for this type.");
+ return nullptr;
+ }
+};
+
+namespace internal {
+
+template <typename T>
+struct GetCType {
+ static_assert(sizeof(T) != sizeof(T), "Unsupported CType");
+};
+
+#define SPECIALIZE_GET_C_TYPE_FOR(ctype, ctypeinfo) \
+ template <> \
+ struct GetCType<ctype> { \
+ static constexpr CTypeInfo Get() { \
+ return CTypeInfo::FromCType(CTypeInfo::Type::ctypeinfo); \
+ } \
+ };
+
+#define SUPPORTED_C_TYPES(V) \
+ V(void, kVoid) \
+ V(bool, kBool) \
+ V(int32_t, kInt32) \
+ V(uint32_t, kUint32) \
+ V(int64_t, kInt64) \
+ V(uint64_t, kUint64) \
+ V(float, kFloat32) \
+ V(double, kFloat64)
+
+SUPPORTED_C_TYPES(SPECIALIZE_GET_C_TYPE_FOR)
+
+template <typename T, typename = void>
+struct EnableIfHasWrapperTypeInfo {};
+
+template <>
+struct EnableIfHasWrapperTypeInfo<void> {};
+
+template <typename T>
+struct EnableIfHasWrapperTypeInfo<T, decltype(WrapperTraits<T>::GetTypeInfo(),
+ void())> {
+ typedef void type;
+};
+
+// T* where T is a primitive (array of primitives).
+template <typename T, typename = void>
+struct GetCTypePointerImpl {
+ static constexpr CTypeInfo Get() {
+ return CTypeInfo::FromCType(GetCType<T>::Get().GetType(),
+ CTypeInfo::IsArrayBit);
+ }
+};
+
+// T* where T is an API object.
+template <typename T>
+struct GetCTypePointerImpl<T, typename EnableIfHasWrapperTypeInfo<T>::type> {
+ static constexpr CTypeInfo Get() {
+ return CTypeInfo::FromWrapperType(WrapperTraits<T>::GetTypeInfo());
+ }
+};
+
+// T** where T is a primitive. Not allowed.
+template <typename T, typename = void>
+struct GetCTypePointerPointerImpl {
+ static_assert(sizeof(T**) != sizeof(T**), "Unsupported type");
+};
+
+// T** where T is an API object (array of API objects).
+template <typename T>
+struct GetCTypePointerPointerImpl<
+ T, typename EnableIfHasWrapperTypeInfo<T>::type> {
+ static constexpr CTypeInfo Get() {
+ return CTypeInfo::FromWrapperType(WrapperTraits<T>::GetTypeInfo(),
+ CTypeInfo::IsArrayBit);
+ }
+};
+
+template <typename T>
+struct GetCType<T**> : public GetCTypePointerPointerImpl<T> {};
+
+template <typename T>
+struct GetCType<T*> : public GetCTypePointerImpl<T> {};
+
+template <typename R, typename... Args>
+class CFunctionInfoImpl : public CFunctionInfo {
+ public:
+ CFunctionInfoImpl()
+ : return_info_(i::GetCType<R>::Get()),
+ arg_count_(sizeof...(Args)),
+ arg_info_{i::GetCType<Args>::Get()...} {
+ static_assert(i::GetCType<R>::Get().GetType() == CTypeInfo::Type::kVoid,
+ "Only void return types are currently supported.");
+ }
+
+ const CTypeInfo& ReturnInfo() const override { return return_info_; }
+ unsigned int ArgumentCount() const override { return arg_count_; }
+ const CTypeInfo& ArgumentInfo(unsigned int index) const override {
+ CHECK_LT(index, ArgumentCount());
+ return arg_info_[index];
+ }
+
+ private:
+ CTypeInfo return_info_;
+ const unsigned int arg_count_;
+ CTypeInfo arg_info_[sizeof...(Args)];
+};
+
+} // namespace internal
+
+class V8_EXPORT CFunction {
+ public:
+ const CTypeInfo& ReturnInfo() const { return type_info_->ReturnInfo(); }
+
+ const CTypeInfo& ArgumentInfo(unsigned int index) const {
+ return type_info_->ArgumentInfo(index);
+ }
+
+ unsigned int ArgumentCount() const { return type_info_->ArgumentCount(); }
+
+ const void* GetAddress() const { return address_; }
+ const CFunctionInfo* GetTypeInfo() const { return type_info_; }
+
+ template <typename F>
+ static CFunction Make(F* func) {
+ return ArgUnwrap<F*>::Make(func);
+ }
+
+ private:
+ const void* address_;
+ const CFunctionInfo* type_info_;
+
+ CFunction(const void* address, const CFunctionInfo* type_info);
+
+ template <typename R, typename... Args>
+ static CFunctionInfo* GetCFunctionInfo() {
+ static internal::CFunctionInfoImpl<R, Args...> instance;
+ return &instance;
+ }
+
+ template <typename F>
+ class ArgUnwrap {
+ static_assert(sizeof(F) != sizeof(F),
+ "CFunction must be created from a function pointer.");
+ };
+
+ template <typename R, typename... Args>
+ class ArgUnwrap<R (*)(Args...)> {
+ public:
+ static CFunction Make(R (*func)(Args...)) {
+ return CFunction(reinterpret_cast<const void*>(func),
+ GetCFunctionInfo<R, Args...>());
+ }
+ };
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_FAST_API_CALLS_H_
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index b9a4866533..01274625c1 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -65,15 +65,15 @@ class V8_EXPORT StringView {
class V8_EXPORT StringBuffer {
public:
virtual ~StringBuffer() = default;
- virtual const StringView& string() = 0;
+ virtual StringView string() const = 0;
// This method copies contents.
- static std::unique_ptr<StringBuffer> create(const StringView&);
+ static std::unique_ptr<StringBuffer> create(StringView);
};
class V8_EXPORT V8ContextInfo {
public:
V8ContextInfo(v8::Local<v8::Context> context, int contextGroupId,
- const StringView& humanReadableName)
+ StringView humanReadableName)
: context(context),
contextGroupId(contextGroupId),
humanReadableName(humanReadableName),
@@ -132,37 +132,36 @@ class V8_EXPORT V8InspectorSession {
virtual void addInspectedObject(std::unique_ptr<Inspectable>) = 0;
// Dispatching protocol messages.
- static bool canDispatchMethod(const StringView& method);
- virtual void dispatchProtocolMessage(const StringView& message) = 0;
+ static bool canDispatchMethod(StringView method);
+ virtual void dispatchProtocolMessage(StringView message) = 0;
virtual std::vector<uint8_t> state() = 0;
virtual std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
supportedDomains() = 0;
// Debugger actions.
- virtual void schedulePauseOnNextStatement(const StringView& breakReason,
- const StringView& breakDetails) = 0;
+ virtual void schedulePauseOnNextStatement(StringView breakReason,
+ StringView breakDetails) = 0;
virtual void cancelPauseOnNextStatement() = 0;
- virtual void breakProgram(const StringView& breakReason,
- const StringView& breakDetails) = 0;
+ virtual void breakProgram(StringView breakReason,
+ StringView breakDetails) = 0;
virtual void setSkipAllPauses(bool) = 0;
- virtual void resume() = 0;
+ virtual void resume(bool setTerminateOnResume = false) = 0;
virtual void stepOver() = 0;
virtual std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
- searchInTextByLines(const StringView& text, const StringView& query,
- bool caseSensitive, bool isRegex) = 0;
+ searchInTextByLines(StringView text, StringView query, bool caseSensitive,
+ bool isRegex) = 0;
// Remote objects.
virtual std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
- v8::Local<v8::Context>, v8::Local<v8::Value>, const StringView& groupName,
+ v8::Local<v8::Context>, v8::Local<v8::Value>, StringView groupName,
bool generatePreview) = 0;
virtual bool unwrapObject(std::unique_ptr<StringBuffer>* error,
- const StringView& objectId, v8::Local<v8::Value>*,
+ StringView objectId, v8::Local<v8::Value>*,
v8::Local<v8::Context>*,
std::unique_ptr<StringBuffer>* objectGroup) = 0;
- virtual void releaseObjectGroup(const StringView&) = 0;
- virtual void triggerPreciseCoverageDeltaUpdate(
- const StringView& occassion) = 0;
+ virtual void releaseObjectGroup(StringView) = 0;
+ virtual void triggerPreciseCoverageDeltaUpdate(StringView occassion) = 0;
};
class V8_EXPORT V8InspectorClient {
@@ -240,7 +239,7 @@ struct V8_EXPORT V8StackTraceId {
V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id);
V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id,
bool should_pause);
- explicit V8StackTraceId(const StringView&);
+ explicit V8StackTraceId(StringView);
V8StackTraceId& operator=(const V8StackTraceId&) = default;
V8StackTraceId& operator=(V8StackTraceId&&) noexcept = default;
~V8StackTraceId() = default;
@@ -265,26 +264,26 @@ class V8_EXPORT V8Inspector {
virtual void idleFinished() = 0;
// Async stack traces instrumentation.
- virtual void asyncTaskScheduled(const StringView& taskName, void* task,
+ virtual void asyncTaskScheduled(StringView taskName, void* task,
bool recurring) = 0;
virtual void asyncTaskCanceled(void* task) = 0;
virtual void asyncTaskStarted(void* task) = 0;
virtual void asyncTaskFinished(void* task) = 0;
virtual void allAsyncTasksCanceled() = 0;
- virtual V8StackTraceId storeCurrentStackTrace(
- const StringView& description) = 0;
+ virtual V8StackTraceId storeCurrentStackTrace(StringView description) = 0;
virtual void externalAsyncTaskStarted(const V8StackTraceId& parent) = 0;
virtual void externalAsyncTaskFinished(const V8StackTraceId& parent) = 0;
// Exceptions instrumentation.
- virtual unsigned exceptionThrown(
- v8::Local<v8::Context>, const StringView& message,
- v8::Local<v8::Value> exception, const StringView& detailedMessage,
- const StringView& url, unsigned lineNumber, unsigned columnNumber,
- std::unique_ptr<V8StackTrace>, int scriptId) = 0;
+ virtual unsigned exceptionThrown(v8::Local<v8::Context>, StringView message,
+ v8::Local<v8::Value> exception,
+ StringView detailedMessage, StringView url,
+ unsigned lineNumber, unsigned columnNumber,
+ std::unique_ptr<V8StackTrace>,
+ int scriptId) = 0;
virtual void exceptionRevoked(v8::Local<v8::Context>, unsigned exceptionId,
- const StringView& message) = 0;
+ StringView message) = 0;
// Connection.
class V8_EXPORT Channel {
@@ -295,8 +294,9 @@ class V8_EXPORT V8Inspector {
virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;
virtual void flushProtocolNotifications() = 0;
};
- virtual std::unique_ptr<V8InspectorSession> connect(
- int contextGroupId, Channel*, const StringView& state) = 0;
+ virtual std::unique_ptr<V8InspectorSession> connect(int contextGroupId,
+ Channel*,
+ StringView state) = 0;
// API methods.
virtual std::unique_ptr<V8StackTrace> createStackTrace(
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index e4d698e6ce..876408ebba 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -160,10 +160,10 @@ class Internals {
kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
- static const int kExternalMemoryAtLastMarkCompactOffset =
+ static const int kExternalMemoryLowSinceMarkCompactOffset =
kExternalMemoryLimitOffset + kApiInt64Size;
static const int kIsolateFastCCallCallerFpOffset =
- kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;
+ kExternalMemoryLowSinceMarkCompactOffset + kApiInt64Size;
static const int kIsolateFastCCallCallerPcOffset =
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateStackGuardOffset =
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index f21a0b8dd0..64f1848665 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 8
-#define V8_MINOR_VERSION 1
-#define V8_BUILD_NUMBER 307
-#define V8_PATCH_LEVEL 31
+#define V8_MINOR_VERSION 3
+#define V8_BUILD_NUMBER 110
+#define V8_PATCH_LEVEL 9
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 54bc4f0835..9926b308b1 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -120,6 +120,8 @@ class EscapableHandleScope;
template<typename T> class ReturnValue;
namespace internal {
+enum class ArgumentsType;
+template <ArgumentsType>
class Arguments;
class DeferredHandles;
class Heap;
@@ -149,11 +151,6 @@ class ConsoleCallArguments;
// --- Handles ---
-#define TYPE_CHECK(T, S) \
- while (false) { \
- *(static_cast<T* volatile*>(0)) = static_cast<S*>(0); \
- }
-
/**
* An object reference managed by the v8 garbage collector.
*
@@ -197,7 +194,7 @@ class Local {
* handles. For example, converting from a Local<String> to a
* Local<Number>.
*/
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
@@ -363,7 +360,7 @@ class MaybeLocal {
template <class S>
V8_INLINE MaybeLocal(Local<S> that)
: val_(reinterpret_cast<T*>(*that)) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
@@ -532,11 +529,16 @@ template <class T> class PersistentBase {
}
/**
- * Install a finalization callback on this object.
- * NOTE: There is no guarantee as to *when* or even *if* the callback is
- * invoked. The invocation is performed solely on a best effort basis.
- * As always, GC-based finalization should *not* be relied upon for any
- * critical form of resource management!
+ * Install a finalization callback on this object.
+ * NOTE: There is no guarantee as to *when* or even *if* the callback is
+ * invoked. The invocation is performed solely on a best effort basis.
+ * As always, GC-based finalization should *not* be relied upon for any
+ * critical form of resource management!
+ *
+ * The callback is supposed to reset the handle. No further V8 API may be
+ * called in this callback. In case additional work involving V8 needs to be
+ * done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
*/
template <typename P>
V8_INLINE void SetWeak(P* parameter,
@@ -618,11 +620,8 @@ class NonCopyablePersistentTraits {
template<class S, class M>
V8_INLINE static void Copy(const Persistent<S, M>& source,
NonCopyablePersistent* dest) {
- Uncompilable<Object>();
- }
- // TODO(dcarney): come up with a good compile error here.
- template<class O> V8_INLINE static void Uncompilable() {
- TYPE_CHECK(O, Primitive);
+ static_assert(sizeof(S) < 0,
+ "NonCopyablePersistentTraits::Copy is not instantiable");
}
};
@@ -665,7 +664,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
template <class S>
V8_INLINE Persistent(Isolate* isolate, Local<S> that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
* Construct a Persistent from a Persistent.
@@ -675,7 +674,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
template <class S, class M2>
V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
* The copy constructors and assignment operator create a Persistent
@@ -760,7 +759,7 @@ class Global : public PersistentBase<T> {
template <class S>
V8_INLINE Global(Isolate* isolate, Local<S> that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
@@ -771,7 +770,7 @@ class Global : public PersistentBase<T> {
template <class S>
V8_INLINE Global(Isolate* isolate, const PersistentBase<S>& that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
@@ -951,7 +950,7 @@ class TracedGlobal : public TracedReferenceBase<T> {
TracedGlobal(Isolate* isolate, Local<S> that) : TracedReferenceBase<T>() {
this->val_ = this->New(isolate, that.val_, &this->val_,
TracedReferenceBase<T>::kWithDestructor);
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
@@ -1074,7 +1073,7 @@ class TracedReference : public TracedReferenceBase<T> {
TracedReference(Isolate* isolate, Local<S> that) : TracedReferenceBase<T>() {
this->val_ = this->New(isolate, that.val_, &this->val_,
TracedReferenceBase<T>::kWithoutDestructor);
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
/**
@@ -1128,17 +1127,11 @@ class TracedReference : public TracedReferenceBase<T> {
/**
* Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
*/
V8_INLINE TracedReference& operator=(const TracedReference& rhs);
/**
* Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
*/
template <class S>
V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
@@ -1155,20 +1148,6 @@ class TracedReference : public TracedReferenceBase<T> {
return reinterpret_cast<TracedReference<S>&>(
const_cast<TracedReference<T>&>(*this));
}
-
- /**
- * Adds a finalization callback to the handle. The type of this callback is
- * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
- * parameter and the first two internal fields of the object.
- *
- * The callback is then supposed to reset the handle in the callback. No
- * further V8 API may be called in this callback. In case additional work
- * involving V8 needs to be done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
- */
- V8_DEPRECATED("Use TracedGlobal<> if callbacks are required.")
- V8_INLINE void SetFinalizationCallback(
- void* parameter, WeakCallbackInfo<void>::Callback callback);
};
/**
@@ -1773,11 +1752,9 @@ class V8_EXPORT ScriptCompiler {
public:
enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON(
"This class takes ownership of source_stream, so use the constructor "
"taking a unique_ptr to make these semantics clearer")
-#endif
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
@@ -2201,6 +2178,7 @@ enum StateTag {
COMPILER,
OTHER,
EXTERNAL,
+ ATOMICS_WAIT,
IDLE
};
@@ -2467,14 +2445,6 @@ class V8_EXPORT ValueDeserializer {
void SetSupportsLegacyWireFormat(bool supports_legacy_wire_format);
/**
- * Expect inline wasm in the data stream (rather than in-memory transfer)
- */
- V8_DEPRECATED(
- "Wasm module serialization is only supported via explicit methods, e.g. "
- "CompiledWasmModule::Serialize()")
- void SetExpectInlineWasm(bool allow_inline_wasm) {}
-
- /**
* Reads the underlying wire format version. Likely mostly to be useful to
* legacy code reading old wire format versions. Must be called after
* ReadHeader.
@@ -2813,9 +2783,6 @@ class V8_EXPORT Value : public Data {
*/
bool IsWasmModuleObject() const;
- V8_DEPRECATED("Use IsWasmModuleObject")
- bool IsWebAssemblyCompiledModule() const;
-
/**
* Returns true if the value is a Module Namespace Object.
*/
@@ -3206,6 +3173,23 @@ class V8_EXPORT String : public Name {
V8_INLINE static String* Cast(v8::Value* obj);
+ /**
+ * Allocates a new string from a UTF-8 literal. This is equivalent to calling
+ * String::NewFromUtf(isolate, "...").ToLocalChecked(), but without the check
+ * overhead.
+ *
+ * When called on a string literal containing '\0', the inferred length is the
+ * length of the input array minus 1 (for the final '\0') and not the value
+ * returned by strlen.
+ **/
+ template <int N>
+ static V8_WARN_UNUSED_RESULT Local<String> NewFromUtf8Literal(
+ Isolate* isolate, const char (&literal)[N],
+ NewStringType type = NewStringType::kNormal) {
+ static_assert(N <= kMaxLength, "String is too long");
+ return NewFromUtf8Literal(isolate, literal, type, N - 1);
+ }
+
/** Allocates a new string from UTF-8 data. Only returns an empty value when
* length > kMaxLength. **/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromUtf8(
@@ -3340,9 +3324,20 @@ class V8_EXPORT String : public Name {
ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
String::Encoding* encoding_out) const;
+ static Local<v8::String> NewFromUtf8Literal(Isolate* isolate,
+ const char* literal,
+ NewStringType type, int length);
+
static void CheckCast(v8::Value* obj);
};
+// Zero-length string specialization (templated string size includes
+// terminator).
+template <>
+inline V8_WARN_UNUSED_RESULT Local<String> String::NewFromUtf8Literal(
+ Isolate* isolate, const char (&literal)[1], NewStringType type) {
+ return String::Empty(isolate);
+}
/**
* A JavaScript symbol (ECMA-262 edition 6)
@@ -4050,6 +4045,13 @@ class V8_EXPORT Object : public Value {
bool IsApiWrapper();
/**
+ * True if this object was created from an object template which was marked
+ * as undetectable. See v8::ObjectTemplate::MarkAsUndetectable for more
+ * information.
+ */
+ bool IsUndetectable();
+
+ /**
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
@@ -4205,7 +4207,7 @@ class ReturnValue {
public:
template <class S> V8_INLINE ReturnValue(const ReturnValue<S>& that)
: value_(that.value_) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
}
// Local setters
template <typename S>
@@ -4785,27 +4787,9 @@ class V8_EXPORT WasmModuleObject : public Object {
*/
CompiledWasmModule GetCompiledModule();
- /**
- * If possible, deserialize the module, otherwise compile it from the provided
- * uncompiled bytes.
- */
- V8_DEPRECATED(
- "Use WasmStreaming for deserialization from cache or the "
- "CompiledWasmModule to transfer between isolates")
- static MaybeLocal<WasmModuleObject> DeserializeOrCompile(
- Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
- MemorySpan<const uint8_t> wire_bytes);
-
V8_INLINE static WasmModuleObject* Cast(Value* obj);
private:
- static MaybeLocal<WasmModuleObject> Deserialize(
- Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
- MemorySpan<const uint8_t> wire_bytes);
- static MaybeLocal<WasmModuleObject> Compile(Isolate* isolate,
- const uint8_t* start,
- size_t length);
-
static void CheckCast(Value* obj);
};
@@ -4978,6 +4962,34 @@ class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
*/
bool IsShared() const;
+ /**
+ * Wrapper around ArrayBuffer::Allocator::Reallocate that preserves IsShared.
+ * Assumes that the backing_store was allocated by the ArrayBuffer allocator
+ * of the given isolate.
+ */
+ static std::unique_ptr<BackingStore> Reallocate(
+ v8::Isolate* isolate, std::unique_ptr<BackingStore> backing_store,
+ size_t byte_length);
+
+ /**
+ * This callback is used only if the memory block for a BackingStore cannot be
+ * allocated with an ArrayBuffer::Allocator. In such cases the destructor of
+ * the BackingStore invokes the callback to free the memory block.
+ */
+ using DeleterCallback = void (*)(void* data, size_t length,
+ void* deleter_data);
+
+ /**
+ * If the memory block of a BackingStore is static or is managed manually,
+ * then this empty deleter along with nullptr deleter_data can be passed to
+ * ArrayBuffer::NewBackingStore to indicate that.
+ *
+ * The manually managed case should be used with caution and only when it
+ * is guaranteed that the memory block freeing happens after detaching its
+ * ArrayBuffer.
+ */
+ static void EmptyDeleter(void* data, size_t length, void* deleter_data);
+
private:
/**
* See [Shared]ArrayBuffer::GetBackingStore and
@@ -4986,14 +4998,13 @@ class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
BackingStore();
};
-/**
- * This callback is used only if the memory block for this backing store cannot
- * be allocated with an ArrayBuffer::Allocator. In such cases the destructor
- * of this backing store object invokes the callback to free the memory block.
- */
+#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
+// Use v8::BackingStore::DeleterCallback instead.
using BackingStoreDeleterCallback = void (*)(void* data, size_t length,
void* deleter_data);
+#endif
+
/**
* An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
*/
@@ -5019,13 +5030,13 @@ class V8_EXPORT ArrayBuffer : public Object {
virtual ~Allocator() = default;
/**
- * Allocate |length| bytes. Return NULL if allocation is not successful.
+ * Allocate |length| bytes. Return nullptr if allocation is not successful.
* Memory should be initialized to zeroes.
*/
virtual void* Allocate(size_t length) = 0;
/**
- * Allocate |length| bytes. Return NULL if allocation is not successful.
+ * Allocate |length| bytes. Return nullptr if allocation is not successful.
* Memory does not have to be initialized.
*/
virtual void* AllocateUninitialized(size_t length) = 0;
@@ -5037,6 +5048,20 @@ class V8_EXPORT ArrayBuffer : public Object {
virtual void Free(void* data, size_t length) = 0;
/**
+ * Reallocate the memory block of size |old_length| to a memory block of
+ * size |new_length| by expanding, contracting, or copying the existing
+ * memory block. If |new_length| > |old_length|, then the new part of
+ * the memory must be initialized to zeros. Return nullptr if reallocation
+ * is not successful.
+ *
+ * The caller guarantees that the memory block was previously allocated
+ * using Allocate or AllocateUninitialized.
+ *
+ * The default implementation allocates a new block and copies data.
+ */
+ virtual void* Reallocate(void* data, size_t old_length, size_t new_length);
+
+ /**
* ArrayBuffer allocation mode. kNormal is a malloc/free style allocation,
* while kReservation is for larger allocations with the ability to set
* access permissions.
@@ -5167,7 +5192,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* to the buffer must not be passed again to any V8 API function.
*/
static std::unique_ptr<BackingStore> NewBackingStore(
- void* data, size_t byte_length, BackingStoreDeleterCallback deleter,
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
void* deleter_data);
/**
@@ -5649,7 +5674,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* to the buffer must not be passed again to any V8 functions.
*/
static std::unique_ptr<BackingStore> NewBackingStore(
- void* data, size_t byte_length, BackingStoreDeleterCallback deleter,
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
void* deleter_data);
/**
@@ -5909,14 +5934,15 @@ class V8_EXPORT RegExp : public Object {
};
/**
- * An instance of the built-in FinalizationGroup constructor.
+ * An instance of the built-in FinalizationRegistry constructor.
*
- * This API is experimental and may change significantly.
+ * The C++ name is FinalizationGroup for backwards compatibility. This API is
+ * experimental and deprecated.
*/
class V8_EXPORT FinalizationGroup : public Object {
public:
/**
- * Runs the cleanup callback of the given FinalizationGroup.
+ * Runs the cleanup callback of the given FinalizationRegistry.
*
* V8 will inform the embedder that there are finalizer callbacks be
* called through HostCleanupFinalizationGroupCallback.
@@ -5931,6 +5957,9 @@ class V8_EXPORT FinalizationGroup : public Object {
* occurred. Otherwise the result is |true| if the cleanup callback
* was called successfully. The result is never |false|.
*/
+ V8_DEPRECATED(
+ "FinalizationGroup cleanup is automatic if "
+ "HostCleanupFinalizationGroupCallback is not set")
static V8_WARN_UNUSED_RESULT Maybe<bool> Cleanup(
Local<FinalizationGroup> finalization_group);
};
@@ -5948,13 +5977,14 @@ class V8_EXPORT External : public Value {
static void CheckCast(v8::Value* obj);
};
-#define V8_INTRINSICS_LIST(F) \
- F(ArrayProto_entries, array_entries_iterator) \
- F(ArrayProto_forEach, array_for_each_iterator) \
- F(ArrayProto_keys, array_keys_iterator) \
- F(ArrayProto_values, array_values_iterator) \
- F(ErrorPrototype, initial_error_prototype) \
- F(IteratorPrototype, initial_iterator_prototype)
+#define V8_INTRINSICS_LIST(F) \
+ F(ArrayProto_entries, array_entries_iterator) \
+ F(ArrayProto_forEach, array_for_each_iterator) \
+ F(ArrayProto_keys, array_keys_iterator) \
+ F(ArrayProto_values, array_values_iterator) \
+ F(ErrorPrototype, initial_error_prototype) \
+ F(IteratorPrototype, initial_iterator_prototype) \
+ F(ObjProto_valueOf, object_value_of_function)
enum Intrinsic {
#define V8_DECL_INTRINSIC(name, iname) k##name,
@@ -6300,6 +6330,7 @@ typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
Local<Object> accessed_object,
Local<Value> data);
+class CFunction;
/**
* A FunctionTemplate is used to create functions at runtime. There
* can only be one function created from a FunctionTemplate in a
@@ -6335,11 +6366,12 @@ typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
* proto_t->Set(isolate, "proto_const", v8::Number::New(isolate, 2));
*
* v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
- * instance_t->SetAccessor(String::NewFromUtf8(isolate, "instance_accessor"),
- * InstanceAccessorCallback);
+ * instance_t->SetAccessor(
+ String::NewFromUtf8Literal(isolate, "instance_accessor"),
+ * InstanceAccessorCallback);
* instance_t->SetHandler(
* NamedPropertyHandlerConfiguration(PropertyHandlerCallback));
- * instance_t->Set(String::NewFromUtf8(isolate, "instance_property"),
+ * instance_t->Set(String::NewFromUtf8Literal(isolate, "instance_property"),
* Number::New(isolate, 3));
*
* v8::Local<v8::Function> function = t->GetFunction();
@@ -6399,6 +6431,12 @@ typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
* child_instance.instance_accessor calls 'InstanceAccessorCallback'
* child_instance.instance_property == 3;
* \endcode
+ *
+ * The additional 'c_function' parameter refers to a fast API call, which
+ * must not trigger GC or JavaScript execution, or call into V8 in other
+ * ways. For more information how to define them, see
+ * include/v8-fast-api-calls.h. Please note that this feature is still
+ * experimental.
*/
class V8_EXPORT FunctionTemplate : public Template {
public:
@@ -6408,7 +6446,8 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0,
ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const CFunction* c_function = nullptr);
/** Get a template included in the snapshot by index. */
V8_DEPRECATED("Use v8::Isolate::GetDataFromSnapshotOnce instead")
@@ -6440,11 +6479,13 @@ class V8_EXPORT FunctionTemplate : public Template {
/**
* Set the call-handler callback for a FunctionTemplate. This
* callback is called whenever the function created from this
- * FunctionTemplate is called.
+ * FunctionTemplate is called. The 'c_function' represents a fast
+ * API call, see the comment above the class declaration.
*/
void SetCallHandler(
FunctionCallback callback, Local<Value> data = Local<Value>(),
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const CFunction* c_function = nullptr);
/** Set the predefined length property for the FunctionTemplate. */
void SetLength(int length);
@@ -7293,14 +7334,15 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
+V8_DEPRECATE_SOON("Use *WithData version.")
typedef void (*MicrotasksCompletedCallback)(Isolate*);
typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
-
/**
* Policy for running microtasks:
- * - explicit: microtasks are invoked with Isolate::RunMicrotasks() method;
+ * - explicit: microtasks are invoked with the
+ * Isolate::PerformMicrotaskCheckpoint() method;
* - scoped: microtasks invocation is controlled by MicrotasksScope objects;
* - auto: microtasks are invoked when the script call depth decrements
* to zero.
@@ -7391,7 +7433,7 @@ class V8_EXPORT MicrotaskQueue {
};
/**
- * This scope is used to control microtasks when kScopeMicrotasksInvocation
+ * This scope is used to control microtasks when MicrotasksPolicy::kScoped
* is used on Isolate. In this mode every non-primitive call to V8 should be
* done inside some MicrotasksScope.
* Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
@@ -7480,7 +7522,7 @@ typedef void (*WasmStreamingCallback)(const FunctionCallbackInfo<Value>&);
// --- Callback for checking if WebAssembly threads are enabled ---
typedef bool (*WasmThreadsEnabledCallback)(Local<Context> context);
-// --- Callback for loading source map file for WASM profiling support
+// --- Callback for loading source map file for Wasm profiling support
typedef Local<String> (*WasmLoadSourceMapCallback)(Isolate* isolate,
const char* name);
@@ -7576,6 +7618,8 @@ class V8_EXPORT HeapStatistics {
size_t total_heap_size_executable() { return total_heap_size_executable_; }
size_t total_physical_size() { return total_physical_size_; }
size_t total_available_size() { return total_available_size_; }
+ size_t total_global_handles_size() { return total_global_handles_size_; }
+ size_t used_global_handles_size() { return used_global_handles_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
size_t malloced_memory() { return malloced_memory_; }
@@ -7603,6 +7647,8 @@ class V8_EXPORT HeapStatistics {
bool does_zap_garbage_;
size_t number_of_native_contexts_;
size_t number_of_detached_contexts_;
+ size_t total_global_handles_size_;
+ size_t used_global_handles_size_;
friend class V8;
friend class Isolate;
@@ -8407,6 +8453,13 @@ class V8_EXPORT Isolate {
kRegExpReplaceCalledOnSlowRegExp = 80,
kDisplayNames = 81,
kSharedArrayBufferConstructed = 82,
+ kArrayPrototypeHasElements = 83,
+ kObjectPrototypeHasElements = 84,
+ kNumberFormatStyleUnit = 85,
+ kDateTimeFormatRange = 86,
+ kDateTimeFormatDateTimeStyle = 87,
+ kBreakIteratorTypeWord = 88,
+ kBreakIteratorTypeLine = 89,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@@ -8472,10 +8525,13 @@ class V8_EXPORT Isolate {
* objects are originally built when a WeakRef is created or
* successfully dereferenced.
*
- * The embedder is expected to call this when a synchronous sequence
- * of ECMAScript execution completes. It's the embedders
- * responsiblity to make this call at a time which does not
- * interrupt synchronous ECMAScript code execution.
+ * This is invoked automatically after microtasks are run. See
+ * MicrotasksPolicy for when microtasks are run.
+ *
+ * This needs to be manually invoked only if the embedder is manually running
+ * microtasks via a custom MicrotaskQueue class's PerformCheckpoint. In that
+ * case, it is the embedder's responsibility to make this call at a time which
+ * does not interrupt synchronous ECMAScript code execution.
*/
void ClearKeptObjects();
@@ -8493,10 +8549,13 @@ class V8_EXPORT Isolate {
AbortOnUncaughtExceptionCallback callback);
/**
- * This specifies the callback to be called when finalization groups
+ * This specifies the callback to be called when FinalizationRegistries
* are ready to be cleaned up and require FinalizationGroup::Cleanup()
* to be called in a future task.
*/
+ V8_DEPRECATED(
+ "FinalizationRegistry cleanup is automatic if "
+ "HostCleanupFinalizationGroupCallback is not set")
void SetHostCleanupFinalizationGroupCallback(
HostCleanupFinalizationGroupCallback callback);
@@ -9000,10 +9059,18 @@ class V8_EXPORT Isolate {
void SetPromiseRejectCallback(PromiseRejectCallback callback);
/**
- * Runs the default MicrotaskQueue until it gets empty.
- * Any exceptions thrown by microtask callbacks are swallowed.
+ * An alias for PerformMicrotaskCheckpoint.
+ */
+ V8_DEPRECATE_SOON("Use PerformMicrotaskCheckpoint.")
+ void RunMicrotasks() { PerformMicrotaskCheckpoint(); }
+
+ /**
+ * Runs the default MicrotaskQueue until it gets empty and perform other
+ * microtask checkpoint steps, such as calling ClearKeptObjects. Asserts that
+ * the MicrotasksPolicy is not kScoped. Any exceptions thrown by microtask
+ * callbacks are swallowed.
*/
- void RunMicrotasks();
+ void PerformMicrotaskCheckpoint();
/**
* Enqueues the callback to the default MicrotaskQueue
@@ -9101,10 +9168,10 @@ class V8_EXPORT Isolate {
void LowMemoryNotification();
/**
- * Optional notification that a context has been disposed. V8 uses
- * these notifications to guide the GC heuristic. Returns the number
- * of context disposals - including this one - since the last time
- * V8 had a chance to clean up.
+ * Optional notification that a context has been disposed. V8 uses these
+ * notifications to guide the GC heuristic and cancel FinalizationRegistry
+ * cleanup tasks. Returns the number of context disposals - including this one
+ * - since the last time V8 had a chance to clean up.
*
* The optional parameter |dependant_context| specifies whether the disposed
* context was depending on state from other contexts or not.
@@ -9218,7 +9285,7 @@ class V8_EXPORT Isolate {
* Returns the UnwindState necessary for use with the Unwinder API.
*/
// TODO(petermarshall): Remove this API.
- V8_DEPRECATE_SOON("Use entry_stubs + code_pages version.")
+ V8_DEPRECATED("Use entry_stubs + code_pages version.")
UnwindState GetUnwindState();
/**
@@ -10347,6 +10414,18 @@ class V8_EXPORT Context {
void SetAbortScriptExecution(AbortScriptExecutionCallback callback);
/**
+ * Returns the value that was set or restored by
+ * SetContinuationPreservedEmbedderData(), if any.
+ */
+ Local<Value> GetContinuationPreservedEmbedderData() const;
+
+ /**
+ * Sets a value that will be stored on continuations and reset while the
+ * continuation runs.
+ */
+ void SetContinuationPreservedEmbedderData(Local<Value> context);
+
+ /**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
*/
@@ -10563,7 +10642,7 @@ class V8_EXPORT Unwinder {
* \return True on success.
*/
// TODO(petermarshall): Remove this API
- V8_DEPRECATE_SOON("Use entry_stubs + code_pages version.")
+ V8_DEPRECATED("Use entry_stubs + code_pages version.")
static bool TryUnwindV8Frames(const UnwindState& unwind_state,
RegisterState* register_state,
const void* stack_base);
@@ -10591,7 +10670,7 @@ class V8_EXPORT Unwinder {
* (but not necessarily) be successful.
*/
// TODO(petermarshall): Remove this API
- V8_DEPRECATE_SOON("Use code_pages version.")
+ V8_DEPRECATED("Use code_pages version.")
static bool PCIsInV8(const UnwindState& unwind_state, void* pc);
/**
@@ -10632,7 +10711,7 @@ Local<T> Local<T>::New(Isolate* isolate, T* that) {
template<class T>
template<class S>
void Eternal<T>::Set(Isolate* isolate, Local<S> handle) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
val_ = reinterpret_cast<T*>(
V8::Eternalize(isolate, reinterpret_cast<Value*>(*handle)));
}
@@ -10676,7 +10755,7 @@ T* PersistentBase<T>::New(Isolate* isolate, T* that) {
template <class T, class M>
template <class S, class M2>
void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
this->Reset();
if (that.IsEmpty()) return;
internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
@@ -10704,7 +10783,7 @@ void PersistentBase<T>::Reset() {
template <class T>
template <class S>
void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
Reset();
if (other.IsEmpty()) return;
this->val_ = New(isolate, other.val_);
@@ -10715,7 +10794,7 @@ template <class T>
template <class S>
void PersistentBase<T>::Reset(Isolate* isolate,
const PersistentBase<S>& other) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
Reset();
if (other.IsEmpty()) return;
this->val_ = New(isolate, other.val_);
@@ -10781,7 +10860,7 @@ Global<T>::Global(Global&& other) : PersistentBase<T>(other.val_) {
template <class T>
template <class S>
Global<T>& Global<T>::operator=(Global<S>&& rhs) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
if (this != &rhs) {
this->Reset();
if (rhs.val_ != nullptr) {
@@ -10816,7 +10895,7 @@ void TracedReferenceBase<T>::Reset() {
template <class T>
template <class S>
void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
Reset();
if (other.IsEmpty()) return;
this->val_ = this->New(isolate, other.val_, &this->val_,
@@ -10826,7 +10905,7 @@ void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
template <class T>
template <class S>
TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
*this = std::move(rhs.template As<T>());
return *this;
}
@@ -10834,7 +10913,7 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
template <class T>
template <class S>
TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
*this = rhs.template As<T>();
return *this;
}
@@ -10865,7 +10944,7 @@ TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
template <class T>
template <class S>
void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
Reset();
if (other.IsEmpty()) return;
this->val_ = this->New(isolate, other.val_, &this->val_,
@@ -10875,7 +10954,7 @@ void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
template <class T>
template <class S>
TracedReference<T>& TracedReference<T>::operator=(TracedReference<S>&& rhs) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
*this = std::move(rhs.template As<T>());
return *this;
}
@@ -10884,7 +10963,7 @@ template <class T>
template <class S>
TracedReference<T>& TracedReference<T>::operator=(
const TracedReference<S>& rhs) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
*this = rhs.template As<T>();
return *this;
}
@@ -10937,20 +11016,13 @@ void TracedGlobal<T>::SetFinalizationCallback(
reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
}
-template <class T>
-void TracedReference<T>::SetFinalizationCallback(
- void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
- V8::SetFinalizationCallbackTraced(
- reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
-}
-
template <typename T>
ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
template <typename T>
template <typename S>
void ReturnValue<T>::Set(const Global<S>& handle) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
@@ -10961,7 +11033,7 @@ void ReturnValue<T>::Set(const Global<S>& handle) {
template <typename T>
template <typename S>
void ReturnValue<T>::Set(const TracedReferenceBase<S>& handle) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_base_of<T, S>::value, "type check");
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
@@ -10972,7 +11044,8 @@ void ReturnValue<T>::Set(const TracedReferenceBase<S>& handle) {
template <typename T>
template <typename S>
void ReturnValue<T>::Set(const Local<S> handle) {
- TYPE_CHECK(T, S);
+ static_assert(std::is_void<T>::value || std::is_base_of<T, S>::value,
+ "type check");
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
} else {
@@ -10982,13 +11055,13 @@ void ReturnValue<T>::Set(const Local<S> handle) {
template<typename T>
void ReturnValue<T>::Set(double i) {
- TYPE_CHECK(T, Number);
+ static_assert(std::is_base_of<T, Number>::value, "type check");
Set(Number::New(GetIsolate(), i));
}
template<typename T>
void ReturnValue<T>::Set(int32_t i) {
- TYPE_CHECK(T, Integer);
+ static_assert(std::is_base_of<T, Integer>::value, "type check");
typedef internal::Internals I;
if (V8_LIKELY(I::IsValidSmi(i))) {
*value_ = I::IntToSmi(i);
@@ -10999,7 +11072,7 @@ void ReturnValue<T>::Set(int32_t i) {
template<typename T>
void ReturnValue<T>::Set(uint32_t i) {
- TYPE_CHECK(T, Integer);
+ static_assert(std::is_base_of<T, Integer>::value, "type check");
// Can't simply use INT32_MAX here for whatever reason.
bool fits_into_int32_t = (i & (1U << 31)) == 0;
if (V8_LIKELY(fits_into_int32_t)) {
@@ -11011,7 +11084,7 @@ void ReturnValue<T>::Set(uint32_t i) {
template<typename T>
void ReturnValue<T>::Set(bool value) {
- TYPE_CHECK(T, Boolean);
+ static_assert(std::is_base_of<T, Boolean>::value, "type check");
typedef internal::Internals I;
int root_index;
if (value) {
@@ -11024,21 +11097,21 @@ void ReturnValue<T>::Set(bool value) {
template<typename T>
void ReturnValue<T>::SetNull() {
- TYPE_CHECK(T, Primitive);
+ static_assert(std::is_base_of<T, Primitive>::value, "type check");
typedef internal::Internals I;
*value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
}
template<typename T>
void ReturnValue<T>::SetUndefined() {
- TYPE_CHECK(T, Primitive);
+ static_assert(std::is_base_of<T, Primitive>::value, "type check");
typedef internal::Internals I;
*value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
}
template<typename T>
void ReturnValue<T>::SetEmptyString() {
- TYPE_CHECK(T, String);
+ static_assert(std::is_base_of<T, String>::value, "type check");
typedef internal::Internals I;
*value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
}
@@ -11060,8 +11133,7 @@ Local<Value> ReturnValue<T>::Get() const {
template <typename T>
template <typename S>
void ReturnValue<T>::Set(S* whatever) {
- // Uncompilable to prevent inadvertent misuse.
- TYPE_CHECK(S*, Primitive);
+ static_assert(sizeof(S) < 0, "incompilable to prevent inadvertent misuse");
}
template <typename T>
@@ -11078,14 +11150,24 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
+ // values_ points to the first argument (not the receiver).
if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
+#ifdef V8_REVERSE_JSARGS
+ return Local<Value>(reinterpret_cast<Value*>(values_ + i));
+#else
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
+#endif
}
template<typename T>
Local<Object> FunctionCallbackInfo<T>::This() const {
+ // values_ points to the first argument (not the receiver).
+#ifdef V8_REVERSE_JSARGS
+ return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
+#else
return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
+#endif
}
@@ -11854,9 +11936,9 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryOffset);
int64_t* external_memory_limit = reinterpret_cast<int64_t*>(
reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryLimitOffset);
- int64_t* external_memory_at_last_mc =
+ int64_t* external_memory_low_since_mc =
reinterpret_cast<int64_t*>(reinterpret_cast<uint8_t*>(this) +
- I::kExternalMemoryAtLastMarkCompactOffset);
+ I::kExternalMemoryLowSinceMarkCompactOffset);
// Embedders are weird: we see both over- and underflows here. Perform the
// addition with unsigned types to avoid undefined behavior.
@@ -11865,23 +11947,22 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
static_cast<uint64_t>(*external_memory));
*external_memory = amount;
- int64_t allocation_diff_since_last_mc =
- static_cast<int64_t>(static_cast<uint64_t>(*external_memory) -
- static_cast<uint64_t>(*external_memory_at_last_mc));
+ if (amount < *external_memory_low_since_mc) {
+ *external_memory_low_since_mc = amount;
+ *external_memory_limit = amount + I::kExternalAllocationSoftLimit;
+ }
+
+ if (change_in_bytes <= 0) return *external_memory;
+
+ int64_t allocation_diff_since_last_mc = static_cast<int64_t>(
+ static_cast<uint64_t>(*external_memory) -
+ static_cast<uint64_t>(*external_memory_low_since_mc));
// Only check memory pressure and potentially trigger GC if the amount of
// external memory increased.
if (allocation_diff_since_last_mc > kMemoryReducerActivationLimit) {
CheckMemoryPressure();
}
-
- if (change_in_bytes < 0) {
- const int64_t lower_limit =
- static_cast<int64_t>(static_cast<uint64_t>(*external_memory_limit) +
- static_cast<uint64_t>(change_in_bytes));
- if (lower_limit > I::kExternalAllocationSoftLimit) {
- *external_memory_limit = lower_limit;
- }
- } else if (change_in_bytes > 0 && amount > *external_memory_limit) {
+ if (amount > *external_memory_limit) {
ReportExternalAllocationLimitReached();
}
return *external_memory;
@@ -11963,8 +12044,4 @@ size_t SnapshotCreator::AddData(Local<T> object) {
} // namespace v8
-
-#undef TYPE_CHECK
-
-
#endif // INCLUDE_V8_H_
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index b26f1a1f08..6c3379b5cb 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -288,7 +288,8 @@
{'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
- {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'minor_mc'},
+ {'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
],
},
@@ -346,6 +347,7 @@
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
],
},
@@ -381,6 +383,7 @@
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'minor_mc'},
+ {'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
],
@@ -849,7 +852,8 @@
{'name': 'v8testing'},
{'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
- {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'minor_mc'},
+ {'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Noavx.
{
@@ -906,8 +910,9 @@
{'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
- {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
- {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'minor_mc'},
+ {'name': 'v8testing', 'variant': 'no_lfa'},
+ {'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Noavx.
{
@@ -1417,7 +1422,7 @@
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 3},
- {'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'trusted'},
],
},
diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc
index d75dcb3c3c..6e506475e4 100644
--- a/deps/v8/samples/hello-world.cc
+++ b/deps/v8/samples/hello-world.cc
@@ -37,9 +37,7 @@ int main(int argc, char* argv[]) {
{
// Create a string containing the JavaScript source code.
v8::Local<v8::String> source =
- v8::String::NewFromUtf8(isolate, "'Hello' + ', World!'",
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(isolate, "'Hello' + ', World!'");
// Compile the source code.
v8::Local<v8::Script> script =
@@ -63,7 +61,7 @@ int main(int argc, char* argv[]) {
// get_local 1
// i32.add)
//
- const char* csource = R"(
+ const char csource[] = R"(
let bytes = new Uint8Array([
0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x07, 0x01,
0x60, 0x02, 0x7f, 0x7f, 0x01, 0x7f, 0x03, 0x02, 0x01, 0x00, 0x07,
@@ -77,8 +75,7 @@ int main(int argc, char* argv[]) {
// Create a string containing the JavaScript source code.
v8::Local<v8::String> source =
- v8::String::NewFromUtf8(isolate, csource, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(isolate, csource);
// Compile the source code.
v8::Local<v8::Script> script =
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index e4f6fd9cee..b23329dc10 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -183,8 +183,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Create a template for the global object where we set the
// built-in global functions.
Local<ObjectTemplate> global = ObjectTemplate::New(GetIsolate());
- global->Set(String::NewFromUtf8(GetIsolate(), "log", NewStringType::kNormal)
- .ToLocalChecked(),
+ global->Set(String::NewFromUtf8Literal(GetIsolate(), "log"),
FunctionTemplate::New(GetIsolate(), LogCallback));
// Each processor gets its own context so different processors don't
@@ -210,8 +209,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
Local<String> process_name =
- String::NewFromUtf8(GetIsolate(), "Process", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(GetIsolate(), "Process");
Local<Value> process_val;
// If there is no Process function, or if it is not a function,
// bail out
@@ -276,17 +274,13 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
// Set the options object as a property on the global object.
context->Global()
- ->Set(context,
- String::NewFromUtf8(GetIsolate(), "options", NewStringType::kNormal)
- .ToLocalChecked(),
+ ->Set(context, String::NewFromUtf8Literal(GetIsolate(), "options"),
opts_obj)
.FromJust();
Local<Object> output_obj = WrapMap(output);
context->Global()
- ->Set(context,
- String::NewFromUtf8(GetIsolate(), "output", NewStringType::kNormal)
- .ToLocalChecked(),
+ ->Set(context, String::NewFromUtf8Literal(GetIsolate(), "output"),
output_obj)
.FromJust();
@@ -563,21 +557,17 @@ Local<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
// Add accessors for each of the fields of the request.
result->SetAccessor(
- String::NewFromUtf8(isolate, "path", NewStringType::kInternalized)
- .ToLocalChecked(),
+ String::NewFromUtf8Literal(isolate, "path", NewStringType::kInternalized),
GetPath);
+ result->SetAccessor(String::NewFromUtf8Literal(isolate, "referrer",
+ NewStringType::kInternalized),
+ GetReferrer);
result->SetAccessor(
- String::NewFromUtf8(isolate, "referrer", NewStringType::kInternalized)
- .ToLocalChecked(),
- GetReferrer);
- result->SetAccessor(
- String::NewFromUtf8(isolate, "host", NewStringType::kInternalized)
- .ToLocalChecked(),
+ String::NewFromUtf8Literal(isolate, "host", NewStringType::kInternalized),
GetHost);
- result->SetAccessor(
- String::NewFromUtf8(isolate, "userAgent", NewStringType::kInternalized)
- .ToLocalChecked(),
- GetUserAgent);
+ result->SetAccessor(String::NewFromUtf8Literal(isolate, "userAgent",
+ NewStringType::kInternalized),
+ GetUserAgent);
// Again, return the result through the current handle scope.
return handle_scope.Escape(result);
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 81b028720c..aed050cecc 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -108,27 +108,20 @@ v8::Local<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
- global->Set(
- v8::String::NewFromUtf8(isolate, "print", v8::NewStringType::kNormal)
- .ToLocalChecked(),
- v8::FunctionTemplate::New(isolate, Print));
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "print"),
+ v8::FunctionTemplate::New(isolate, Print));
// Bind the global 'read' function to the C++ Read callback.
- global->Set(v8::String::NewFromUtf8(
- isolate, "read", v8::NewStringType::kNormal).ToLocalChecked(),
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "read"),
v8::FunctionTemplate::New(isolate, Read));
// Bind the global 'load' function to the C++ Load callback.
- global->Set(v8::String::NewFromUtf8(
- isolate, "load", v8::NewStringType::kNormal).ToLocalChecked(),
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "load"),
v8::FunctionTemplate::New(isolate, Load));
// Bind the 'quit' function
- global->Set(v8::String::NewFromUtf8(
- isolate, "quit", v8::NewStringType::kNormal).ToLocalChecked(),
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "quit"),
v8::FunctionTemplate::New(isolate, Quit));
// Bind the 'version' function
- global->Set(
- v8::String::NewFromUtf8(isolate, "version", v8::NewStringType::kNormal)
- .ToLocalChecked(),
- v8::FunctionTemplate::New(isolate, Version));
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "version"),
+ v8::FunctionTemplate::New(isolate, Version));
return v8::Context::New(isolate, NULL, global);
}
@@ -161,22 +154,19 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), "Bad parameters",
- v8::NewStringType::kNormal).ToLocalChecked());
+ v8::String::NewFromUtf8Literal(args.GetIsolate(), "Bad parameters"));
return;
}
v8::String::Utf8Value file(args.GetIsolate(), args[0]);
if (*file == NULL) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
- v8::NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "Error loading file"));
return;
}
v8::Local<v8::String> source;
if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
- v8::NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "Error loading file"));
return;
}
@@ -191,22 +181,19 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args.GetIsolate(), args[i]);
if (*file == NULL) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
- v8::NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "Error loading file"));
return;
}
v8::Local<v8::String> source;
if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
- v8::NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(), source, args[i], false, false)) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), "Error executing file",
- v8::NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "Error executing file"));
return;
}
}
@@ -228,8 +215,8 @@ void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(
- v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion(),
- v8::NewStringType::kNormal).ToLocalChecked());
+ v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion())
+ .ToLocalChecked());
}
@@ -276,12 +263,9 @@ int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc,
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Local<v8::String> file_name =
- v8::String::NewFromUtf8(isolate, "unnamed",
- v8::NewStringType::kNormal).ToLocalChecked();
+ v8::String::NewFromUtf8Literal(isolate, "unnamed");
v8::Local<v8::String> source;
- if (!v8::String::NewFromUtf8(isolate, argv[++i],
- v8::NewStringType::kNormal)
- .ToLocal(&source)) {
+ if (!v8::String::NewFromUtf8(isolate, argv[++i]).ToLocal(&source)) {
return 1;
}
bool success = ExecuteString(isolate, source, file_name, false, true);
@@ -290,8 +274,7 @@ int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc,
} else {
// Use all other arguments as names of files to load and run.
v8::Local<v8::String> file_name =
- v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, str).ToLocalChecked();
v8::Local<v8::String> source;
if (!ReadFile(isolate, str).ToLocal(&source)) {
fprintf(stderr, "Error reading '%s'\n", str);
@@ -313,8 +296,7 @@ void RunShell(v8::Local<v8::Context> context, v8::Platform* platform) {
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(
- v8::String::NewFromUtf8(context->GetIsolate(), "(shell)",
- v8::NewStringType::kNormal).ToLocalChecked());
+ v8::String::NewFromUtf8Literal(context->GetIsolate(), "(shell)"));
while (true) {
char buffer[kBufferSize];
fprintf(stderr, "> ");
@@ -323,8 +305,7 @@ void RunShell(v8::Local<v8::Context> context, v8::Platform* platform) {
v8::HandleScope handle_scope(context->GetIsolate());
ExecuteString(
context->GetIsolate(),
- v8::String::NewFromUtf8(context->GetIsolate(), str,
- v8::NewStringType::kNormal).ToLocalChecked(),
+ v8::String::NewFromUtf8(context->GetIsolate(), str).ToLocalChecked(),
name, true, true);
while (v8::platform::PumpMessageLoop(platform, context->GetIsolate()))
continue;
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 3e802dac97..772ad53b32 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -16,6 +16,7 @@ include_rules = [
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
+ "+src/heap/local-heap.h",
"+src/heap/off-thread-factory-inl.h",
"+src/heap/off-thread-factory.h",
"+src/heap/read-only-heap-inl.h",
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index 217d21222d..0d2ad2f8a0 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -86,7 +86,7 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
-MAKE_TO_LOCAL(ToLocal, JSFinalizationGroup, FinalizationGroup)
+MAKE_TO_LOCAL(ToLocal, JSFinalizationRegistry, FinalizationGroup)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 7fe974de24..b2d6db3661 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -13,6 +13,7 @@
#include "src/api/api-inl.h"
+#include "include/v8-fast-api-calls.h"
#include "include/v8-profiler.h"
#include "include/v8-util.h"
#include "src/api/api-natives.h"
@@ -309,6 +310,7 @@ class CallDepthScope {
#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
if (do_callback) CheckMicrotasksScopesConsistency(microtask_queue);
#endif
+ DCHECK(CheckKeptObjectsClearedAfterMicrotaskCheckpoint(microtask_queue));
isolate_->set_next_v8_call_is_safe_for_termination(safe_for_termination_);
}
@@ -323,6 +325,15 @@ class CallDepthScope {
}
private:
+ bool CheckKeptObjectsClearedAfterMicrotaskCheckpoint(
+ i::MicrotaskQueue* microtask_queue) {
+ bool did_perform_microtask_checkpoint =
+ do_callback && microtask_queue &&
+ microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto;
+ return !did_perform_microtask_checkpoint ||
+ isolate_->heap()->weak_refs_keep_during_job().IsUndefined(isolate_);
+ }
+
i::Isolate* const isolate_;
Local<Context> context_;
bool escaped_;
@@ -453,11 +464,13 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
// BUG(1718): Don't use the take_snapshot since we don't support
// HeapObjectIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
- char* first_newline = strchr(last_few_messages, '\n');
- if (first_newline == nullptr || first_newline[1] == '\0')
- first_newline = last_few_messages;
- PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
- PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
+ if (!FLAG_correctness_fuzzer_suppressions) {
+ char* first_newline = strchr(last_few_messages, '\n');
+ if (first_newline == nullptr || first_newline[1] == '\0')
+ first_newline = last_few_messages;
+ base::OS::PrintError("\n<--- Last few GCs --->\n%s\n", first_newline);
+ base::OS::PrintError("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
+ }
}
Utils::ReportOOMFailure(isolate, location, is_heap_oom);
// If the fatal error handler returns, we stop execution.
@@ -542,6 +555,21 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
}
void Free(void* data, size_t) override { free(data); }
+
+ void* Reallocate(void* data, size_t old_length, size_t new_length) override {
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+ // Work around for GCC bug on AIX
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+ void* new_data = __linux_realloc(data, new_length);
+#else
+ void* new_data = realloc(data, new_length);
+#endif
+ if (new_length > old_length) {
+ memset(reinterpret_cast<uint8_t*>(new_data) + old_length, 0,
+ new_length - old_length);
+ }
+ return new_data;
+ }
};
struct SnapshotCreatorData {
@@ -867,6 +895,8 @@ StartupData SnapshotCreator::CreateBlob(
startup_serializer.SerializeWeakReferencesAndDeferred();
can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
+ startup_serializer.CheckNoDirtyFinalizationRegistries();
+
read_only_serializer.FinalizeSerialization();
can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
@@ -985,7 +1015,7 @@ void ResourceConstraints::ConfigureDefaultsFromHeapSize(
set_initial_young_generation_size_in_bytes(young_generation);
set_initial_old_generation_size_in_bytes(old_generation);
}
- if (i::kRequiresCodeRange) {
+ if (i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
}
@@ -1000,7 +1030,7 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_young_generation_size_in_bytes(young_generation);
set_max_old_generation_size_in_bytes(old_generation);
- if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
+ if (virtual_memory_limit > 0 && i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
i::Min(i::kMaximalCodeRangeSize,
static_cast<size_t>(virtual_memory_limit / 8)));
@@ -1442,7 +1472,8 @@ static Local<FunctionTemplate> FunctionTemplateNew(
i::Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
v8::Local<Signature> signature, int length, bool do_not_cache,
v8::Local<Private> cached_property_name = v8::Local<Private>(),
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect) {
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const CFunction* c_function = nullptr) {
i::Handle<i::Struct> struct_obj = isolate->factory()->NewStruct(
i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
i::Handle<i::FunctionTemplateInfo> obj =
@@ -1460,7 +1491,8 @@ static Local<FunctionTemplate> FunctionTemplateNew(
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
}
if (callback != nullptr) {
- Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type);
+ Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type,
+ c_function);
}
obj->set_undetectable(false);
obj->set_needs_access_check(false);
@@ -1478,14 +1510,15 @@ static Local<FunctionTemplate> FunctionTemplateNew(
Local<FunctionTemplate> FunctionTemplate::New(
Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
v8::Local<Signature> signature, int length, ConstructorBehavior behavior,
- SideEffectType side_effect_type) {
+ SideEffectType side_effect_type, const CFunction* c_function) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
LOG_API(i_isolate, FunctionTemplate, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- auto templ = FunctionTemplateNew(i_isolate, callback, data, signature, length,
- false, Local<Private>(), side_effect_type);
+ auto templ =
+ FunctionTemplateNew(i_isolate, callback, data, signature, length, false,
+ Local<Private>(), side_effect_type, c_function);
if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
return templ;
}
@@ -1534,7 +1567,8 @@ Local<AccessorSignature> AccessorSignature::New(
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
v8::Local<Value> data,
- SideEffectType side_effect_type) {
+ SideEffectType side_effect_type,
+ const CFunction* c_function) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
@@ -1548,6 +1582,15 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
obj->set_data(*Utils::OpenHandle(*data));
+ if (c_function != nullptr) {
+ DCHECK_NOT_NULL(c_function->GetAddress());
+ i::FunctionTemplateInfo::SetCFunction(
+ isolate, info,
+ i::handle(*FromCData(isolate, c_function->GetAddress()), isolate));
+ i::FunctionTemplateInfo::SetCSignature(
+ isolate, info,
+ i::handle(*FromCData(isolate, c_function->GetTypeInfo()), isolate));
+ }
info->set_call_code(*obj);
}
@@ -3462,7 +3505,6 @@ VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
VALUE_IS_SPECIFIC_TYPE(WasmModuleObject, WasmModuleObject)
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
-VALUE_IS_SPECIFIC_TYPE(WebAssemblyCompiledModule, WasmModuleObject)
#undef VALUE_IS_SPECIFIC_TYPE
@@ -3784,6 +3826,28 @@ bool v8::BackingStore::IsShared() const {
return reinterpret_cast<const i::BackingStore*>(this)->is_shared();
}
+// static
+std::unique_ptr<v8::BackingStore> v8::BackingStore::Reallocate(
+ v8::Isolate* isolate, std::unique_ptr<v8::BackingStore> backing_store,
+ size_t byte_length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, ArrayBuffer, BackingStore_Reallocate);
+ CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::BackingStore* i_backing_store =
+ reinterpret_cast<i::BackingStore*>(backing_store.get());
+ if (!i_backing_store->Reallocate(i_isolate, byte_length)) {
+ i::FatalProcessOutOfMemory(i_isolate, "v8::BackingStore::Reallocate");
+ }
+ return backing_store;
+}
+
+// static
+void v8::BackingStore::EmptyDeleter(void* data, size_t length,
+ void* deleter_data) {
+ DCHECK_NULL(deleter_data);
+}
+
std::shared_ptr<v8::BackingStore> v8::ArrayBuffer::GetBackingStore() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
@@ -4800,6 +4864,11 @@ bool v8::Object::IsApiWrapper() {
return self->IsApiWrapper();
}
+bool v8::Object::IsUndetectable() {
+ auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ return self->IsUndetectable();
+}
+
MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
Local<Value> recv, int argc,
Local<Value> argv[]) {
@@ -6085,6 +6154,24 @@ void Context::SetAbortScriptExecution(
}
}
+Local<Value> Context::GetContinuationPreservedEmbedderData() const {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ i::Handle<i::Object> data(
+ context->native_context().continuation_preserved_embedder_data(),
+ isolate);
+ return ToApiHandle<Object>(data);
+}
+
+void Context::SetContinuationPreservedEmbedderData(Local<Value> data) {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ if (data.IsEmpty())
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ context->native_context().set_continuation_preserved_embedder_data(
+ *i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*data)));
+}
+
namespace {
i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::FixedArray list, size_t index) {
@@ -6290,6 +6377,19 @@ STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
result = Utils::ToLocal(handle_result); \
}
+Local<String> String::NewFromUtf8Literal(Isolate* isolate, const char* literal,
+ NewStringType type, int length) {
+ DCHECK_LE(length, i::String::kMaxLength);
+ i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ LOG_API(i_isolate, String, NewFromUtf8Literal);
+ i::Handle<i::String> handle_result =
+ NewString(i_isolate->factory(), type,
+ i::Vector<const char>(literal, length))
+ .ToHandleChecked();
+ return Utils::ToLocal(handle_result);
+}
+
MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
NewStringType type, int length) {
NEW_STRING(isolate, String, NewFromUtf8, char, data, type, length);
@@ -7197,57 +7297,6 @@ MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
}
-MaybeLocal<WasmModuleObject> WasmModuleObject::Deserialize(
- Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
- MemorySpan<const uint8_t> wire_bytes) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::MaybeHandle<i::WasmModuleObject> maybe_module_object =
- i::wasm::DeserializeNativeModule(
- i_isolate, {serialized_module.data(), serialized_module.size()},
- {wire_bytes.data(), wire_bytes.size()}, {});
- i::Handle<i::WasmModuleObject> module_object;
- if (!maybe_module_object.ToHandle(&module_object)) {
- return MaybeLocal<WasmModuleObject>();
- }
- return Local<WasmModuleObject>::Cast(
- Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
-}
-
-MaybeLocal<WasmModuleObject> WasmModuleObject::DeserializeOrCompile(
- Isolate* isolate, MemorySpan<const uint8_t> serialized_module,
- MemorySpan<const uint8_t> wire_bytes) {
- MaybeLocal<WasmModuleObject> ret =
- Deserialize(isolate, serialized_module, wire_bytes);
- if (!ret.IsEmpty()) {
- return ret;
- }
- return Compile(isolate, wire_bytes.data(), wire_bytes.size());
-}
-
-MaybeLocal<WasmModuleObject> WasmModuleObject::Compile(Isolate* isolate,
- const uint8_t* start,
- size_t length) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
- return MaybeLocal<WasmModuleObject>();
- }
- i::MaybeHandle<i::JSObject> maybe_compiled;
- {
- i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()");
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- maybe_compiled = i_isolate->wasm_engine()->SyncCompile(
- i_isolate, enabled_features, &thrower,
- i::wasm::ModuleWireBytes(start, start + length));
- }
- CHECK_EQ(maybe_compiled.is_null(), i_isolate->has_pending_exception());
- if (maybe_compiled.is_null()) {
- i_isolate->OptionalRescheduleException(false);
- return MaybeLocal<WasmModuleObject>();
- }
- return Local<WasmModuleObject>::Cast(
- Utils::ToLocal(maybe_compiled.ToHandleChecked()));
-}
-
WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
Isolate* isolate) {
USE(isolate_);
@@ -7262,6 +7311,21 @@ void WasmModuleObjectBuilderStreaming::Finish() {}
void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {}
+void* v8::ArrayBuffer::Allocator::Reallocate(void* data, size_t old_length,
+ size_t new_length) {
+ if (old_length == new_length) return data;
+ uint8_t* new_data =
+ reinterpret_cast<uint8_t*>(AllocateUninitialized(new_length));
+ if (new_data == nullptr) return nullptr;
+ size_t bytes_to_copy = std::min(old_length, new_length);
+ memcpy(new_data, data, bytes_to_copy);
+ if (new_length > bytes_to_copy) {
+ memset(new_data + bytes_to_copy, 0, new_length - bytes_to_copy);
+ }
+ Free(data, old_length);
+ return new_data;
+}
+
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@@ -7519,7 +7583,7 @@ std::unique_ptr<v8::BackingStore> v8::ArrayBuffer::NewBackingStore(
}
std::unique_ptr<v8::BackingStore> v8::ArrayBuffer::NewBackingStore(
- void* data, size_t byte_length, BackingStoreDeleterCallback deleter,
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
void* deleter_data) {
CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
std::unique_ptr<i::BackingStoreBase> backing_store =
@@ -7848,7 +7912,7 @@ std::unique_ptr<v8::BackingStore> v8::SharedArrayBuffer::NewBackingStore(
}
std::unique_ptr<v8::BackingStore> v8::SharedArrayBuffer::NewBackingStore(
- void* data, size_t byte_length, BackingStoreDeleterCallback deleter,
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
void* deleter_data) {
CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
std::unique_ptr<i::BackingStoreBase> backing_store =
@@ -8328,16 +8392,17 @@ void Isolate::SetHostCleanupFinalizationGroupCallback(
Maybe<bool> FinalizationGroup::Cleanup(
Local<FinalizationGroup> finalization_group) {
- i::Handle<i::JSFinalizationGroup> fg = Utils::OpenHandle(*finalization_group);
- i::Isolate* isolate = fg->native_context().GetIsolate();
- i::Handle<i::Context> i_context(fg->native_context(), isolate);
+ i::Handle<i::JSFinalizationRegistry> fr =
+ Utils::OpenHandle(*finalization_group);
+ i::Isolate* isolate = fr->native_context().GetIsolate();
+ i::Handle<i::Context> i_context(fr->native_context(), isolate);
Local<Context> context = Utils::ToLocal(i_context);
ENTER_V8(isolate, context, FinalizationGroup, Cleanup, Nothing<bool>(),
i::HandleScope);
- i::Handle<i::Object> callback(fg->cleanup(), isolate);
- fg->set_scheduled_for_cleanup(false);
+ i::Handle<i::Object> callback(fr->cleanup(), isolate);
+ fr->set_scheduled_for_cleanup(false);
has_pending_exception =
- i::JSFinalizationGroup::Cleanup(isolate, fg, callback).IsNothing();
+ i::JSFinalizationRegistry::Cleanup(isolate, fr, callback).IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -8455,6 +8520,8 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
+ heap_statistics->total_global_handles_size_ = heap->TotalGlobalHandlesSize();
+ heap_statistics->used_global_handles_size_ = heap->UsedGlobalHandlesSize();
#ifndef V8_SHARED_RO_HEAP
i::ReadOnlySpace* ro_space = heap->read_only_space();
@@ -8655,10 +8722,10 @@ void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
isolate->SetPromiseRejectCallback(callback);
}
-void Isolate::RunMicrotasks() {
+void Isolate::PerformMicrotaskCheckpoint() {
DCHECK_NE(MicrotasksPolicy::kScoped, GetMicrotasksPolicy());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->default_microtask_queue()->RunMicrotasks(isolate);
+ isolate->default_microtask_queue()->PerformCheckpoint(this);
}
void Isolate::EnqueueMicrotask(Local<Function> v8_function) {
@@ -9265,6 +9332,34 @@ MaybeLocal<Array> debug::GetInternalProperties(Isolate* v8_isolate,
return Utils::ToLocal(result);
}
+namespace {
+void CollectPrivateMethodsAndAccessorsFromContext(
+ i::Isolate* isolate, i::Handle<i::Context> context,
+ i::IsStaticFlag is_static_flag, std::vector<Local<Value>>* names_out,
+ std::vector<Local<Value>>* values_out) {
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ int local_count = scope_info->ContextLocalCount();
+ for (int j = 0; j < local_count; ++j) {
+ i::VariableMode mode = scope_info->ContextLocalMode(j);
+ i::IsStaticFlag flag = scope_info->ContextLocalIsStaticFlag(j);
+ if (!i::IsPrivateMethodOrAccessorVariableMode(mode) ||
+ flag != is_static_flag) {
+ continue;
+ }
+
+ i::Handle<i::String> name(scope_info->ContextLocalName(j), isolate);
+ int context_index = scope_info->ContextHeaderLength() + j;
+ i::Handle<i::Object> slot_value(context->get(context_index), isolate);
+ DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
+ slot_value->IsJSFunction());
+ DCHECK_IMPLIES(mode != i::VariableMode::kPrivateMethod,
+ slot_value->IsAccessorPair());
+ names_out->push_back(Utils::ToLocal(name));
+ values_out->push_back(Utils::ToLocal(slot_value));
+ }
+}
+} // anonymous namespace
+
bool debug::GetPrivateMembers(Local<Context> context, Local<Object> value,
std::vector<Local<Value>>* names_out,
std::vector<Local<Value>>* values_out) {
@@ -9285,7 +9380,7 @@ bool debug::GetPrivateMembers(Local<Context> context, Local<Object> value,
i::GetKeysConversion::kConvertToString),
false);
- // Estimate number of private entries to return in the FixedArray.
+ // Estimate number of private fields and private instance methods/accessors.
int private_entries_count = 0;
for (int i = 0; i < keys->length(); ++i) {
// Exclude the private brand symbols.
@@ -9305,10 +9400,42 @@ bool debug::GetPrivateMembers(Local<Context> context, Local<Object> value,
}
}
+ // Estimate number of static private methods/accessors for classes.
+ bool has_static_private_methods_or_accessors = false;
+ if (receiver->IsJSFunction()) {
+ i::Handle<i::JSFunction> func(i::JSFunction::cast(*receiver), isolate);
+ i::Handle<i::SharedFunctionInfo> shared(func->shared(), isolate);
+ if (shared->is_class_constructor() &&
+ shared->has_static_private_methods_or_accessors()) {
+ has_static_private_methods_or_accessors = true;
+ i::Handle<i::Context> context(func->context(), isolate);
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ int local_count = scope_info->ContextLocalCount();
+ for (int j = 0; j < local_count; ++j) {
+ i::VariableMode mode = scope_info->ContextLocalMode(j);
+ i::IsStaticFlag is_static_flag =
+ scope_info->ContextLocalIsStaticFlag(j);
+ if (i::IsPrivateMethodOrAccessorVariableMode(mode) &&
+ is_static_flag == i::IsStaticFlag::kStatic) {
+ private_entries_count += local_count;
+ break;
+ }
+ }
+ }
+ }
+
DCHECK(names_out->empty());
names_out->reserve(private_entries_count);
DCHECK(values_out->empty());
values_out->reserve(private_entries_count);
+
+ if (has_static_private_methods_or_accessors) {
+ i::Handle<i::Context> context(i::JSFunction::cast(*receiver).context(),
+ isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(
+ isolate, context, i::IsStaticFlag::kStatic, names_out, values_out);
+ }
+
for (int i = 0; i < keys->length(); ++i) {
i::Handle<i::Object> obj_key(keys->get(i), isolate);
i::Handle<i::Symbol> key(i::Symbol::cast(*obj_key), isolate);
@@ -9320,25 +9447,8 @@ bool debug::GetPrivateMembers(Local<Context> context, Local<Object> value,
if (key->is_private_brand()) {
DCHECK(value->IsContext());
i::Handle<i::Context> context(i::Context::cast(*value), isolate);
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
-
- for (int j = 0; j < local_count; ++j) {
- i::VariableMode mode = scope_info->ContextLocalMode(j);
- if (!i::IsPrivateMethodOrAccessorVariableMode(mode)) {
- continue;
- }
-
- i::Handle<i::String> name(scope_info->ContextLocalName(j), isolate);
- int context_index = scope_info->ContextHeaderLength() + j;
- i::Handle<i::Object> slot_value(context->get(context_index), isolate);
- DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
- slot_value->IsJSFunction());
- DCHECK_IMPLIES(mode != i::VariableMode::kPrivateMethod,
- slot_value->IsAccessorPair());
- names_out->push_back(Utils::ToLocal(name));
- values_out->push_back(Utils::ToLocal(slot_value));
- }
+ CollectPrivateMethodsAndAccessorsFromContext(
+ isolate, context, i::IsStaticFlag::kNotStatic, names_out, values_out);
} else { // Private fields
i::Handle<i::String> name(
i::String::cast(i::Symbol::cast(*key).description()), isolate);
@@ -9397,6 +9507,12 @@ void debug::BreakRightNow(Isolate* v8_isolate) {
isolate->debug()->HandleDebugBreak(i::kIgnoreIfAllFramesBlackboxed);
}
+void debug::SetTerminateOnResume(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ isolate->debug()->SetTerminateOnResume();
+}
+
bool debug::AllFramesOnStackAreBlackboxed(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_DO_NOT_USE(isolate);
@@ -9438,7 +9554,7 @@ std::vector<int> debug::Script::LineEnds() const {
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
- i::Script::InitLineEnds(script);
+ i::Script::InitLineEnds(isolate, script);
CHECK(script->line_ends().IsFixedArray());
i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()),
isolate);
@@ -9530,9 +9646,9 @@ bool debug::Script::GetPossibleBreakpoints(
locations);
}
- i::Script::InitLineEnds(script);
- CHECK(script->line_ends().IsFixedArray());
i::Isolate* isolate = script->GetIsolate();
+ i::Script::InitLineEnds(isolate, script);
+ CHECK(script->line_ends().IsFixedArray());
i::Handle<i::FixedArray> line_ends =
i::Handle<i::FixedArray>::cast(i::handle(script->line_ends(), isolate));
CHECK(line_ends->length());
@@ -9585,7 +9701,7 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
column = std::max(0, column - script->column_offset());
}
- i::Script::InitLineEnds(script);
+ i::Script::InitLineEnds(script->GetIsolate(), script);
CHECK(script->line_ends().IsFixedArray());
i::Handle<i::FixedArray> line_ends = i::Handle<i::FixedArray>::cast(
i::handle(script->line_ends(), script->GetIsolate()));
@@ -9732,6 +9848,15 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
function_bytes.length(), 0);
}
+int debug::WasmScript::CodeOffset() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+
+ return module->code.offset();
+}
+
debug::Location::Location(int line_number, int column_number)
: line_number_(line_number),
column_number_(column_number),
@@ -9797,6 +9922,16 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
+void debug::TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
+}
+
+void debug::TierUpAllModulesPerIsolate(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
+}
+
void debug::SetDebugDelegate(Isolate* v8_isolate,
debug::DebugDelegate* delegate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -9911,7 +10046,7 @@ debug::ConsoleCallArguments::ConsoleCallArguments(
: v8::FunctionCallbackInfo<v8::Value>(
nullptr,
// Drop the first argument (receiver, i.e. the "console" object).
- args.address_of_arg_at(args.length() > 1 ? 1 : 0),
+ args.length() > 1 ? args.address_of_first_argument() : nullptr,
args.length() - 1) {}
int debug::GetStackFrameId(v8::Local<v8::StackFrame> frame) {
@@ -10826,6 +10961,34 @@ void EmbedderHeapTracer::ResetHandleInNonTracingGC(
UNREACHABLE();
}
+const void* CTypeInfo::GetWrapperInfo() const {
+ DCHECK(payload_ & kWrapperTypeInfoMask);
+ return reinterpret_cast<const void*>(payload_ & kWrapperTypeInfoMask);
+}
+
+CFunction::CFunction(const void* address, const CFunctionInfo* type_info)
+ : address_(address), type_info_(type_info) {
+ CHECK_NOT_NULL(address_);
+ CHECK_NOT_NULL(type_info_);
+ for (unsigned int i = 0; i < type_info_->ArgumentCount(); ++i) {
+ if (type_info_->ArgumentInfo(i).IsArray()) {
+ // Array args require an integer passed for their length
+ // as the next argument.
+ DCHECK_LT(i + 1, type_info_->ArgumentCount());
+ switch (type_info_->ArgumentInfo(i + 1).GetType()) {
+ case CTypeInfo::Type::kInt32:
+ case CTypeInfo::Type::kUint32:
+ case CTypeInfo::Type::kInt64:
+ case CTypeInfo::Type::kUint64:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
namespace internal {
const size_t HandleScopeImplementer::kEnteredContextsOffset =
@@ -11007,6 +11170,27 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
callback(info);
}
+void InvokeFinalizationRegistryCleanupFromTask(
+ Handle<Context> context,
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<Object> callback) {
+ Isolate* isolate = finalization_registry->native_context().GetIsolate();
+ RuntimeCallTimerScope timer(
+ isolate, RuntimeCallCounterId::kFinalizationRegistryCleanupFromTask);
+ // Do not use ENTER_V8 because this is always called from a running
+ // FinalizationRegistryCleanupTask within V8 and we should not log it as an
+ // API call. This method is implemented here to avoid duplication of the
+ // exception handling and microtask running logic in CallDepthScope.
+ if (IsExecutionTerminatingCheck(isolate)) return;
+ Local<v8::Context> api_context = Utils::ToLocal(context);
+ CallDepthScope<true> call_depth_scope(isolate, api_context);
+ VMState<OTHER> state(isolate);
+ if (JSFinalizationRegistry::Cleanup(isolate, finalization_registry, callback)
+ .IsNothing()) {
+ call_depth_scope.Escape();
+ }
+}
+
// Undefine macros for jumbo build.
#undef LOG_API
#undef ENTER_V8_DO_NOT_USE
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 004740099c..4c383d3e43 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -26,6 +26,7 @@ namespace v8 {
namespace internal {
class JSArrayBufferView;
+class JSFinalizationRegistry;
} // namespace internal
namespace debug {
@@ -92,7 +93,7 @@ class RegisteredExtension {
V(Data, Object) \
V(RegExp, JSRegExp) \
V(Object, JSReceiver) \
- V(FinalizationGroup, JSFinalizationGroup) \
+ V(FinalizationGroup, JSFinalizationRegistry) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
@@ -205,7 +206,7 @@ class Utils {
static inline Local<BigUint64Array> ToLocalBigUint64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<FinalizationGroup> ToLocal(
- v8::internal::Handle<v8::internal::JSFinalizationGroup> obj);
+ v8::internal::Handle<v8::internal::JSFinalizationRegistry> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
@@ -561,6 +562,11 @@ void InvokeAccessorGetterCallback(
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback);
+void InvokeFinalizationRegistryCleanupFromTask(
+ Handle<Context> context,
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<Object> callback);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 033efc45b3..5a6846c33f 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -132,13 +132,11 @@ void Report(Handle<Script> script, int position, Vector<const char> text,
// Hook to report successful execution of {AsmJs::CompileAsmViaWasm} phase.
void ReportCompilationSuccess(Handle<Script> script, int position,
- double translate_time, double compile_time,
- size_t module_size) {
+ double compile_time, size_t module_size) {
if (FLAG_suppress_asm_messages || !FLAG_trace_asm_time) return;
EmbeddedVector<char, 100> text;
- int length = SNPrintF(
- text, "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %zu bytes",
- translate_time, compile_time, module_size);
+ int length = SNPrintF(text, "success, compile time %0.3f ms, %zu bytes",
+ compile_time, module_size);
CHECK_NE(-1, length);
text.Truncate(length);
Report(script, position, text, MessageTemplate::kAsmJsCompiled,
@@ -195,16 +193,17 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
compilation_info_(&zone_, parse_info, literal),
module_(nullptr),
asm_offsets_(nullptr),
- translate_time_(0),
compile_time_(0),
- module_source_size_(0),
- translate_time_micro_(0),
- translate_zone_size_(0) {}
+ module_source_size_(0) {}
protected:
Status ExecuteJobImpl() final;
Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) final;
+ Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ OffThreadIsolate* isolate) final {
+ UNREACHABLE();
+ }
private:
void RecordHistograms(Isolate* isolate);
@@ -216,21 +215,14 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
wasm::ZoneBuffer* asm_offsets_;
wasm::AsmJsParser::StdlibSet stdlib_uses_;
- double translate_time_; // Time (milliseconds) taken to execute step [1].
double compile_time_; // Time (milliseconds) taken to execute step [2].
int module_source_size_; // Module source size in bytes.
- int64_t translate_time_micro_; // Time (microseconds) taken to translate.
- size_t translate_zone_size_;
DISALLOW_COPY_AND_ASSIGN(AsmJsCompilationJob);
};
UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
// Step 1: Translate asm.js module to WebAssembly module.
- size_t compile_zone_start = compilation_info()->zone()->allocation_size();
- base::ElapsedTimer translate_timer;
- translate_timer.Start();
-
Zone* compile_zone = compilation_info()->zone();
Zone translate_zone(allocator_, ZONE_NAME);
@@ -254,19 +246,8 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
parser.module_builder()->WriteAsmJsOffsetTable(asm_offsets_);
stdlib_uses_ = *parser.stdlib_uses();
- size_t compile_zone_size =
- compilation_info()->zone()->allocation_size() - compile_zone_start;
- translate_zone_size_ = translate_zone.allocation_size();
- translate_time_ = translate_timer.Elapsed().InMillisecondsF();
- translate_time_micro_ = translate_timer.Elapsed().InMicroseconds();
module_source_size_ = compilation_info()->literal()->end_position() -
compilation_info()->literal()->start_position();
- if (FLAG_trace_asm_parser) {
- PrintF(
- "[asm.js translation successful: time=%0.3fms, "
- "translate_zone=%zuKB, compile_zone+=%zuKB]\n",
- translate_time_, translate_zone_size_ / KB, compile_zone_size / KB);
- }
return SUCCEEDED;
}
@@ -297,26 +278,12 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(
RecordHistograms(isolate);
ReportCompilationSuccess(handle(Script::cast(shared_info->script()), isolate),
compilation_info()->literal()->position(),
- translate_time_, compile_time_, module_->size());
+ compile_time_, module_->size());
return SUCCEEDED;
}
void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
- Counters* counters = isolate->counters();
- counters->asm_wasm_translation_time()->AddSample(
- static_cast<int>(translate_time_micro_));
- counters->asm_wasm_translation_peak_memory_bytes()->AddSample(
- static_cast<int>(translate_zone_size_));
- counters->asm_module_size_bytes()->AddSample(module_source_size_);
- // translation_throughput is not exact (assumes MB == 1000000). But that is ok
- // since the metric is stored in buckets that lose some precision anyways.
- int translation_throughput =
- translate_time_micro_ != 0
- ? static_cast<int>(static_cast<int64_t>(module_source_size_) /
- translate_time_micro_)
- : 0;
- counters->asm_wasm_translation_throughput()->AddSample(
- translation_throughput);
+ isolate->counters()->asm_module_size_bytes()->AddSample(module_source_size_);
}
std::unique_ptr<UnoptimizedCompilationJob> AsmJs::NewCompilationJob(
@@ -330,7 +297,7 @@ inline bool IsValidAsmjsMemorySize(size_t size) {
// Enforce asm.js spec minimum size.
if (size < (1u << 12u)) return false;
// Enforce engine-limited and flag-limited maximum allocation size.
- if (size > wasm::max_mem_pages() * uint64_t{wasm::kWasmPageSize}) {
+ if (size > wasm::max_initial_mem_pages() * uint64_t{wasm::kWasmPageSize}) {
return false;
}
// Enforce power-of-2 sizes for 2^12 - 2^24.
@@ -441,9 +408,12 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
return single_function;
}
- Handle<String> exports_name =
- isolate->factory()->InternalizeUtf8String("exports");
- return Object::GetProperty(isolate, instance, exports_name);
+ // Here we rely on the fact that the exports object is eagerly created.
+ // The following check is a weak indicator for that. If this ever changes,
+ // then we'll have to call the "exports" getter, and be careful about
+ // handling possible stack overflow exceptions.
+ DCHECK(instance->exports_object().IsJSObject());
+ return handle(instance->exports_object(), isolate);
}
} // namespace internal
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index af4b49c6c3..8a1f8b2f16 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -295,13 +295,6 @@ void AstTraversalVisitor<Subclass>::VisitNativeFunctionLiteral(
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitDoExpression(DoExpression* expr) {
- PROCESS_EXPRESSION(expr);
- RECURSE(VisitBlock(expr->block()));
- RECURSE(VisitVariableProxy(expr->result()));
-}
-
-template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitConditional(Conditional* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->condition()));
@@ -519,15 +512,6 @@ void AstTraversalVisitor<Subclass>::VisitSpread(Spread* expr) {
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitStoreInArrayLiteral(
- StoreInArrayLiteral* expr) {
- PROCESS_EXPRESSION(expr);
- RECURSE_EXPRESSION(Visit(expr->array()));
- RECURSE_EXPRESSION(Visit(expr->index()));
- RECURSE_EXPRESSION(Visit(expr->value()));
-}
-
-template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
EmptyParentheses* expr) {
PROCESS_EXPRESSION(expr);
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index fd61cd6808..7e1be44da1 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -28,6 +28,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/common/globals.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/heap/factory-inl.h"
#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/objects-inl.h"
@@ -57,24 +58,24 @@ class OneByteStringStream {
} // namespace
-void AstRawString::Internalize(Factory* factory) {
+void AstRawString::Internalize(Isolate* isolate) {
DCHECK(!has_string_);
if (literal_bytes_.length() == 0) {
- set_string(factory->empty_string());
+ set_string(isolate->factory()->empty_string());
} else if (is_one_byte()) {
OneByteStringKey key(hash_field_, literal_bytes_);
- set_string(factory->InternalizeStringWithKey(&key));
+ set_string(isolate->factory()->InternalizeStringWithKey(&key));
} else {
TwoByteStringKey key(hash_field_,
Vector<const uint16_t>::cast(literal_bytes_));
- set_string(factory->InternalizeStringWithKey(&key));
+ set_string(isolate->factory()->InternalizeStringWithKey(&key));
}
}
-void AstRawString::Internalize(OffThreadFactory* factory) {
+void AstRawString::Internalize(OffThreadIsolate* isolate) {
DCHECK(!has_string_);
if (literal_bytes_.length() == 0) {
- set_string(factory->empty_string());
+ set_string(isolate->factory()->empty_string());
return;
}
@@ -82,28 +83,30 @@ void AstRawString::Internalize(OffThreadFactory* factory) {
// construction and don't have access to the main thread string table yet, so
// we just unconditionally create strings and will internalize them properly
// during merging.
- OffThreadHandle<SeqString> string;
+ Handle<SeqString> string;
if (is_one_byte()) {
- string = factory->NewOneByteInternalizedString(
+ string = isolate->factory()->NewOneByteInternalizedString(
Vector<const uint8_t>::cast(literal_bytes_), hash_field());
} else {
- string = factory->NewTwoByteInternalizedString(
+ string = isolate->factory()->NewTwoByteInternalizedString(
Vector<const uc16>::cast(literal_bytes_), hash_field());
}
set_string(string);
}
bool AstRawString::AsArrayIndex(uint32_t* index) const {
- // The StringHasher will set up the hash in such a way that we can use it to
- // figure out whether the string is convertible to an array index.
- if ((hash_field_ & Name::kIsNotArrayIndexMask) != 0) return false;
+ // The StringHasher will set up the hash. Bail out early if we know it
+ // can't be convertible to an array index.
+ if ((hash_field_ & Name::kIsNotIntegerIndexMask) != 0) return false;
if (length() <= Name::kMaxCachedArrayIndexLength) {
*index = Name::ArrayIndexValueBits::decode(hash_field_);
- } else {
- OneByteStringStream stream(literal_bytes_);
- CHECK(StringToIndex(&stream, index));
+ return true;
}
- return true;
+ // Might be an index, but too big to cache it. Do the slow conversion. This
+ // might fail if the string is outside uint32_t (but within "safe integer")
+ // range.
+ OneByteStringStream stream(literal_bytes_);
+ return StringToIndex(&stream, index);
}
bool AstRawString::IsIntegerIndex() const {
@@ -159,37 +162,91 @@ bool AstRawString::Compare(void* a, void* b) {
}
}
-template <typename Factory>
-void AstConsString::Internalize(Factory* factory) {
+template <typename LocalIsolate>
+Handle<String> AstConsString::Allocate(LocalIsolate* isolate) const {
+ DCHECK(string_.is_null());
+
if (IsEmpty()) {
- set_string(factory->empty_string());
- return;
+ return isolate->factory()->empty_string();
}
- // AstRawStrings are internalized before AstConsStrings, so
+ // AstRawStrings are internalized before AstConsStrings are allocated, so
// AstRawString::string() will just work.
- FactoryHandle<Factory, String> tmp(segment_.string->string().get<Factory>());
+ Handle<String> tmp = segment_.string->string();
for (AstConsString::Segment* current = segment_.next; current != nullptr;
current = current->next) {
- tmp =
- factory
- ->NewConsString(
- current->string->string().get<Factory>(), tmp,
- // TODO(leszeks): This is to avoid memory regressions while this
- // path is under development -- the off-thread factory doesn't
- // support young allocations. Figure out a way to avoid memory
- // regressions related to ConsStrings in the off-thread path.
- std::is_same<Factory, OffThreadFactory>::value
- ? AllocationType::kOld
- : AllocationType::kYoung)
+ tmp = isolate->factory()
+ ->NewConsString(current->string->string(), tmp,
+ AllocationType::kOld)
+ .ToHandleChecked();
+ }
+ return tmp;
+}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<String> AstConsString::Allocate<Isolate>(Isolate* isolate) const;
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<String> AstConsString::Allocate<OffThreadIsolate>(
+ OffThreadIsolate* isolate) const;
+
+template <typename LocalIsolate>
+Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
+ if (IsEmpty()) {
+ return isolate->factory()->empty_string();
+ }
+ if (!segment_.next) {
+ return segment_.string->string();
+ }
+
+ int result_length = 0;
+ bool is_one_byte = true;
+ for (const AstConsString::Segment* current = &segment_; current != nullptr;
+ current = current->next) {
+ result_length += current->string->length();
+ is_one_byte = is_one_byte && current->string->is_one_byte();
+ }
+
+ if (is_one_byte) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()
+ ->NewRawOneByteString(result_length, AllocationType::kOld)
.ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ uint8_t* dest = result->GetChars(no_gc) + result_length;
+ for (const AstConsString::Segment* current = &segment_; current != nullptr;
+ current = current->next) {
+ int length = current->string->length();
+ dest -= length;
+ CopyChars(dest, current->string->raw_data(), length);
+ }
+ DCHECK_EQ(dest, result->GetChars(no_gc));
+ return result;
+ }
+
+ Handle<SeqTwoByteString> result =
+ isolate->factory()
+ ->NewRawTwoByteString(result_length, AllocationType::kOld)
+ .ToHandleChecked();
+ DisallowHeapAllocation no_gc;
+ uint16_t* dest = result->GetChars(no_gc) + result_length;
+ for (const AstConsString::Segment* current = &segment_; current != nullptr;
+ current = current->next) {
+ int length = current->string->length();
+ dest -= length;
+ if (current->string->is_one_byte()) {
+ CopyChars(dest, current->string->raw_data(), length);
+ } else {
+ CopyChars(dest,
+ reinterpret_cast<const uint16_t*>(current->string->raw_data()),
+ length);
+ }
}
- set_string(tmp);
+ DCHECK_EQ(dest, result->GetChars(no_gc));
+ return result;
}
-template EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) void AstConsString::Internalize<Factory>(Factory*
- factory);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void AstConsString::
- Internalize<OffThreadFactory>(OffThreadFactory* factory);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<String> AstConsString::AllocateFlat<Isolate>(Isolate* isolate) const;
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<String> AstConsString::AllocateFlat<OffThreadIsolate>(
+ OffThreadIsolate* isolate) const;
std::forward_list<const AstRawString*> AstConsString::ToRawStrings() const {
std::forward_list<const AstRawString*> result;
@@ -275,10 +332,7 @@ const AstRawString* AstValueFactory::CloneFromOtherFactory(
}
AstConsString* AstValueFactory::NewConsString() {
- AstConsString* new_string = new (zone_) AstConsString;
- DCHECK_NOT_NULL(new_string);
- AddConsString(new_string);
- return new_string;
+ return new (zone_) AstConsString;
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str) {
@@ -290,30 +344,23 @@ AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
return NewConsString()->AddString(zone_, str1)->AddString(zone_, str2);
}
-template <typename Factory>
-void AstValueFactory::Internalize(Factory* factory) {
+template <typename LocalIsolate>
+void AstValueFactory::Internalize(LocalIsolate* isolate) {
// Strings need to be internalized before values, because values refer to
// strings.
for (AstRawString* current = strings_; current != nullptr;) {
AstRawString* next = current->next();
- current->Internalize(factory);
- current = next;
- }
-
- // AstConsStrings refer to AstRawStrings.
- for (AstConsString* current = cons_strings_; current != nullptr;) {
- AstConsString* next = current->next();
- current->Internalize(factory);
+ current->Internalize(isolate);
current = next;
}
ResetStrings();
}
template EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) void AstValueFactory::Internalize<Factory>(Factory*
- factory);
+ V8_EXPORT_PRIVATE) void AstValueFactory::Internalize<Isolate>(Isolate*
+ isolate);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void AstValueFactory::
- Internalize<OffThreadFactory>(OffThreadFactory* factory);
+ Internalize<OffThreadIsolate>(OffThreadIsolate* isolate);
AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
Vector<const byte> literal_bytes) {
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 9c53f0a8f8..dce9de4069 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -32,9 +32,7 @@
#include "src/base/hashmap.h"
#include "src/common/globals.h"
-#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/heap/off-thread-factory.h"
#include "src/numbers/conversions.h"
// Ast(Raw|Cons)String and AstValueFactory are for storing strings and
@@ -45,6 +43,9 @@
namespace v8 {
namespace internal {
+class Isolate;
+class OffThreadIsolate;
+
class AstRawString final : public ZoneObject {
public:
bool IsEmpty() const { return literal_bytes_.length() == 0; }
@@ -57,8 +58,8 @@ class AstRawString final : public ZoneObject {
V8_EXPORT_PRIVATE bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const;
- void Internalize(Factory* factory);
- void Internalize(OffThreadFactory* factory);
+ void Internalize(Isolate* isolate);
+ void Internalize(OffThreadIsolate* isolate);
// Access the physical representation:
bool is_one_byte() const { return is_one_byte_; }
@@ -70,7 +71,7 @@ class AstRawString final : public ZoneObject {
uint32_t Hash() const { return hash_field_ >> Name::kHashShift; }
// This function can be called after internalizing.
- V8_INLINE HandleOrOffThreadHandle<String> string() const {
+ V8_INLINE Handle<String> string() const {
DCHECK(has_string_);
return string_;
}
@@ -97,7 +98,7 @@ class AstRawString final : public ZoneObject {
return &next_;
}
- void set_string(HandleOrOffThreadHandle<String> string) {
+ void set_string(Handle<String> string) {
DCHECK(!string.is_null());
DCHECK(!has_string_);
string_ = string;
@@ -108,7 +109,7 @@ class AstRawString final : public ZoneObject {
union {
AstRawString* next_;
- HandleOrOffThreadHandle<String> string_;
+ Handle<String> string_;
};
Vector<const byte> literal_bytes_; // Memory owned by Zone.
@@ -143,53 +144,42 @@ class AstConsString final : public ZoneObject {
return segment_.string == nullptr;
}
- template <typename Factory>
- void Internalize(Factory* factory);
-
- // This function can be called after internalizing.
- V8_INLINE HandleOrOffThreadHandle<String> string() const {
- DCHECK(has_string_);
+ template <typename LocalIsolate>
+ Handle<String> GetString(LocalIsolate* isolate) {
+ if (string_.is_null()) {
+ string_ = Allocate(isolate);
+ }
return string_;
}
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<String> AllocateFlat(LocalIsolate* isolate) const;
+
std::forward_list<const AstRawString*> ToRawStrings() const;
private:
friend class AstValueFactory;
- AstConsString() : next_(nullptr), segment_({nullptr, nullptr}) {}
-
- AstConsString* next() const { return next_; }
- AstConsString** next_location() { return &next_; }
+ AstConsString() : string_(), segment_({nullptr, nullptr}) {}
- void set_string(HandleOrOffThreadHandle<String> string) {
- DCHECK(!string.is_null());
- DCHECK(!has_string_);
- string_ = string;
-#ifdef DEBUG
- has_string_ = true;
-#endif
- }
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<String> Allocate(LocalIsolate* isolate) const;
- // {string_} is stored as Address* instead of a Handle<String> so it can be
- // stored in a union with {next_}.
- union {
- AstConsString* next_;
- HandleOrOffThreadHandle<String> string_;
- };
+ Handle<String> string_;
+ // A linked list of AstRawStrings of the contents of this AstConsString.
+ // This list has several properties:
+ //
+ // * For empty strings the string pointer is null,
+ // * Appended raw strings are added to the head of the list, so they are in
+ // reverse order
struct Segment {
const AstRawString* string;
AstConsString::Segment* next;
};
Segment segment_;
-
-#ifdef DEBUG
- // (Debug-only:) Verify the object life-cylce: Some functions may only be
- // called after internalization (that is, after a v8::internal::String has
- // been set); some only before.
- bool has_string_ = false;
-#endif
};
enum class AstSymbol : uint8_t { kHomeObjectSymbol };
@@ -294,8 +284,6 @@ class AstValueFactory {
: string_table_(string_constants->string_table()),
strings_(nullptr),
strings_end_(&strings_),
- cons_strings_(nullptr),
- cons_strings_end_(&cons_strings_),
string_constants_(string_constants),
empty_cons_string_(nullptr),
zone_(zone),
@@ -329,8 +317,8 @@ class AstValueFactory {
V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str1,
const AstRawString* str2);
- template <typename Factory>
- void Internalize(Factory* factory);
+ template <typename LocalIsolate>
+ void Internalize(LocalIsolate* isolate);
#define F(name, str) \
const AstRawString* name##_string() const { \
@@ -338,7 +326,7 @@ class AstValueFactory {
}
AST_STRING_CONSTANTS(F)
#undef F
- const AstConsString* empty_cons_string() const { return empty_cons_string_; }
+ AstConsString* empty_cons_string() const { return empty_cons_string_; }
private:
AstRawString* AddString(AstRawString* string) {
@@ -346,16 +334,9 @@ class AstValueFactory {
strings_end_ = string->next_location();
return string;
}
- AstConsString* AddConsString(AstConsString* string) {
- *cons_strings_end_ = string;
- cons_strings_end_ = string->next_location();
- return string;
- }
void ResetStrings() {
strings_ = nullptr;
strings_end_ = &strings_;
- cons_strings_ = nullptr;
- cons_strings_end_ = &cons_strings_;
}
V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
Vector<const uint8_t> literal);
@@ -366,16 +347,13 @@ class AstValueFactory {
// All strings are copied here, one after another (no zeroes inbetween).
base::CustomMatcherHashMap string_table_;
- // We need to keep track of strings_ in order since cons strings require their
- // members to be internalized first.
AstRawString* strings_;
AstRawString** strings_end_;
- AstConsString* cons_strings_;
- AstConsString** cons_strings_end_;
// Holds constant string values which are shared across the isolate.
const AstStringConstants* string_constants_;
- const AstConsString* empty_cons_string_;
+
+ AstConsString* empty_cons_string_;
// Caches one character lowercase strings (for minified code).
static const int kMaxOneCharStringValue = 128;
@@ -387,12 +365,12 @@ class AstValueFactory {
};
extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void AstValueFactory::Internalize<Factory>(Factory*
- factory);
+ V8_EXPORT_PRIVATE) void AstValueFactory::Internalize<Isolate>(Isolate*
+ isolate);
extern template EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) void AstValueFactory::
- Internalize<OffThreadFactory>(OffThreadFactory* factory);
+ Internalize<OffThreadIsolate>(OffThreadIsolate* isolate);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 12c3aa981f..651508b677 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -10,9 +10,12 @@
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
+#include "src/base/logging.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h"
#include "src/common/assert-scope.h"
+#include "src/execution/off-thread-isolate.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/numbers/conversions-inl.h"
#include "src/numbers/double.h"
#include "src/objects/contexts.h"
@@ -57,7 +60,6 @@ void AstNode::Print(Isolate* isolate) {
AstPrinter::PrintOut(isolate, this);
}
-
#endif // DEBUG
#define RETURN_NODE(Node) \
@@ -202,8 +204,7 @@ void FunctionLiteral::set_inferred_name(Handle<String> inferred_name) {
scope()->set_has_inferred_function_name(true);
}
-void FunctionLiteral::set_raw_inferred_name(
- const AstConsString* raw_inferred_name) {
+void FunctionLiteral::set_raw_inferred_name(AstConsString* raw_inferred_name) {
DCHECK_NOT_NULL(raw_inferred_name);
raw_inferred_name_ = raw_inferred_name;
DCHECK(inferred_name_.is_null());
@@ -233,11 +234,7 @@ int FunctionLiteral::start_position() const {
return scope()->start_position();
}
-
-int FunctionLiteral::end_position() const {
- return scope()->end_position();
-}
-
+int FunctionLiteral::end_position() const { return scope()->end_position(); }
LanguageMode FunctionLiteral::language_mode() const {
return scope()->language_mode();
@@ -281,17 +278,6 @@ std::unique_ptr<char[]> FunctionLiteral::GetDebugName() const {
return result;
}
-bool FunctionLiteral::requires_brand_initialization() const {
- Scope* outer = scope_->outer_scope();
-
- // If there are no variables declared in the outer scope other than
- // the class name variable, the outer class scope may be elided when
- // the function is deserialized after preparsing.
- if (!outer->is_class_scope()) return false;
-
- return outer->AsClassScope()->brand() != nullptr;
-}
-
bool FunctionLiteral::private_name_lookup_skips_outer_class() const {
return scope()->private_name_lookup_skips_outer_class();
}
@@ -339,7 +325,6 @@ bool ObjectLiteral::Property::IsCompileTimeValue() const {
(kind_ == MATERIALIZED_LITERAL && value_->IsCompileTimeValue());
}
-
void ObjectLiteral::Property::set_emit_store(bool emit_store) {
emit_store_ = emit_store;
}
@@ -473,7 +458,8 @@ int ObjectLiteral::InitDepthAndFlags() {
return depth_acc;
}
-void ObjectLiteral::BuildBoilerplateDescription(Isolate* isolate) {
+template <typename LocalIsolate>
+void ObjectLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) {
if (!boilerplate_description_.is_null()) return;
int index_keys = 0;
@@ -518,9 +504,10 @@ void ObjectLiteral::BuildBoilerplateDescription(Isolate* isolate) {
uint32_t element_index = 0;
Handle<Object> key =
key_literal->AsArrayIndex(&element_index)
- ? isolate->factory()->NewNumberFromUint(element_index)
- : Handle<Object>::cast(
- key_literal->AsRawPropertyName()->string().get<Factory>());
+ ? isolate->factory()
+ ->template NewNumberFromUint<AllocationType::kOld>(
+ element_index)
+ : Handle<Object>::cast(key_literal->AsRawPropertyName()->string());
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
@@ -532,6 +519,10 @@ void ObjectLiteral::BuildBoilerplateDescription(Isolate* isolate) {
boilerplate_description_ = boilerplate_description;
}
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral::
+ BuildBoilerplateDescription(Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral::
+ BuildBoilerplateDescription(OffThreadIsolate* isolate);
bool ObjectLiteral::IsFastCloningSupported() const {
// The CreateShallowObjectLiteratal builtin doesn't copy elements, and object
@@ -622,7 +613,8 @@ int ArrayLiteral::InitDepthAndFlags() {
return depth_acc;
}
-void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
+template <typename LocalIsolate>
+void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) {
if (!boilerplate_description_.is_null()) return;
int constants_length =
@@ -632,9 +624,11 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
Handle<FixedArrayBase> elements;
if (use_doubles) {
- elements = isolate->factory()->NewFixedDoubleArray(constants_length);
+ elements = isolate->factory()->NewFixedDoubleArray(constants_length,
+ AllocationType::kOld);
} else {
- elements = isolate->factory()->NewFixedArrayWithHoles(constants_length);
+ elements = isolate->factory()->NewFixedArrayWithHoles(constants_length,
+ AllocationType::kOld);
}
// Fill in the literals.
@@ -664,7 +658,7 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
}
// New handle scope here, needs to be after BuildContants().
- HandleScope scope(isolate);
+ typename LocalIsolate::HandleScopeType scope(isolate);
Object boilerplate_value = *GetBoilerplateValue(element, isolate);
// We shouldn't allocate after creating the boilerplate value.
@@ -679,14 +673,15 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
boilerplate_value = Smi::zero();
}
- DCHECK_EQ(boilerplate_descriptor_kind(),
- GetMoreGeneralElementsKind(
- boilerplate_descriptor_kind(),
- boilerplate_value.OptimalElementsKind(isolate)));
+ DCHECK_EQ(
+ boilerplate_descriptor_kind(),
+ GetMoreGeneralElementsKind(boilerplate_descriptor_kind(),
+ boilerplate_value.OptimalElementsKind(
+ GetIsolateForPtrCompr(*elements))));
- Handle<FixedArray>::cast(elements)->set(array_index, boilerplate_value);
+ FixedArray::cast(*elements).set(array_index, boilerplate_value);
}
- }
+ } // namespace internal
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
@@ -698,6 +693,11 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
boilerplate_description_ =
isolate->factory()->NewArrayBoilerplateDescription(kind, elements);
}
+template EXPORT_TEMPLATE_DEFINE(
+ V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(Isolate*
+ isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ArrayLiteral::
+ BuildBoilerplateDescription(OffThreadIsolate* isolate);
bool ArrayLiteral::IsFastCloningSupported() const {
return depth() <= 1 &&
@@ -712,8 +712,9 @@ bool MaterializedLiteral::IsSimple() const {
return false;
}
+template <typename LocalIsolate>
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
- Isolate* isolate) {
+ LocalIsolate* isolate) {
if (expression->IsLiteral()) {
return expression->AsLiteral()->BuildValue(isolate);
}
@@ -731,6 +732,12 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
}
return isolate->factory()->uninitialized_value();
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> MaterializedLiteral::GetBoilerplateValue(
+ Expression* expression, Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> MaterializedLiteral::GetBoilerplateValue(
+ Expression* expression, OffThreadIsolate* isolate);
int MaterializedLiteral::InitDepthAndFlags() {
if (IsArrayLiteral()) return AsArrayLiteral()->InitDepthAndFlags();
@@ -750,7 +757,8 @@ bool MaterializedLiteral::NeedsInitialAllocationSite() {
return false;
}
-void MaterializedLiteral::BuildConstants(Isolate* isolate) {
+template <typename LocalIsolate>
+void MaterializedLiteral::BuildConstants(LocalIsolate* isolate) {
if (IsArrayLiteral()) {
AsArrayLiteral()->BuildBoilerplateDescription(isolate);
return;
@@ -761,19 +769,30 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
}
DCHECK(IsRegExpLiteral());
}
+template EXPORT_TEMPLATE_DEFINE(
+ V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(
+ V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(OffThreadIsolate*
+ isolate);
+template <typename LocalIsolate>
Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
Handle<FixedArray> raw_strings = isolate->factory()->NewFixedArray(
this->raw_strings()->length(), AllocationType::kOld);
bool raw_and_cooked_match = true;
for (int i = 0; i < raw_strings->length(); ++i) {
- if (this->cooked_strings()->at(i) == nullptr ||
- *this->raw_strings()->at(i)->string().get<Factory>() !=
- *this->cooked_strings()->at(i)->string().get<Factory>()) {
+ if (this->raw_strings()->at(i) != this->cooked_strings()->at(i)) {
+ // If the AstRawStrings don't match, then neither should the allocated
+ // Strings, since the AstValueFactory should have deduplicated them
+ // already.
+ DCHECK_IMPLIES(this->cooked_strings()->at(i) != nullptr,
+ *this->cooked_strings()->at(i)->string() !=
+ *this->raw_strings()->at(i)->string());
+
raw_and_cooked_match = false;
}
- raw_strings->set(i, *this->raw_strings()->at(i)->string().get<Factory>());
+ raw_strings->set(i, *this->raw_strings()->at(i)->string());
}
Handle<FixedArray> cooked_strings = raw_strings;
if (!raw_and_cooked_match) {
@@ -781,8 +800,7 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
this->cooked_strings()->length(), AllocationType::kOld);
for (int i = 0; i < cooked_strings->length(); ++i) {
if (this->cooked_strings()->at(i) != nullptr) {
- cooked_strings->set(
- i, *this->cooked_strings()->at(i)->string().get<Factory>());
+ cooked_strings->set(i, *this->cooked_strings()->at(i)->string());
} else {
cooked_strings->set(i, ReadOnlyRoots(isolate).undefined_value());
}
@@ -791,6 +809,12 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
return isolate->factory()->NewTemplateObjectDescription(raw_strings,
cooked_strings);
}
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT)
+ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
+ OffThreadIsolate* isolate);
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
// Add is not commutative due to potential for string addition.
@@ -839,20 +863,16 @@ bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
MatchLiteralCompareTypeof(right_, op(), left_, expr, literal);
}
-
static bool IsVoidOfLiteral(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != nullptr && maybe_unary->op() == Token::VOID &&
maybe_unary->expression()->IsLiteral();
}
-
// Check for the pattern: void <literal> equals <expression> or
// undefined equals <expression>
-static bool MatchLiteralCompareUndefined(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr) {
+static bool MatchLiteralCompareUndefined(Expression* left, Token::Value op,
+ Expression* right, Expression** expr) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
@@ -870,10 +890,8 @@ bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
}
// Check for the pattern: null equals <expression>
-static bool MatchLiteralCompareNull(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr) {
+static bool MatchLiteralCompareNull(Expression* left, Token::Value op,
+ Expression* right, Expression** expr) {
if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
return true;
@@ -960,14 +978,16 @@ bool Literal::AsArrayIndex(uint32_t* value) const {
return ToUint32(value) && *value != kMaxUInt32;
}
-Handle<Object> Literal::BuildValue(Isolate* isolate) const {
+template <typename LocalIsolate>
+Handle<Object> Literal::BuildValue(LocalIsolate* isolate) const {
switch (type()) {
case kSmi:
return handle(Smi::FromInt(smi_), isolate);
case kHeapNumber:
- return isolate->factory()->NewNumber<AllocationType::kOld>(number_);
+ return isolate->factory()->template NewNumber<AllocationType::kOld>(
+ number_);
case kString:
- return string_->string().get<Factory>();
+ return string_->string();
case kSymbol:
return isolate->factory()->home_object_symbol();
case kBoolean:
@@ -985,6 +1005,10 @@ Handle<Object> Literal::BuildValue(Isolate* isolate) const {
}
UNREACHABLE();
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> Literal::BuildValue(Isolate* isolate) const;
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> Literal::BuildValue(OffThreadIsolate* isolate) const;
bool Literal::ToBooleanIsTrue() const {
switch (type()) {
@@ -1024,7 +1048,6 @@ uint32_t Literal::Hash() {
: ComputeLongHash(double_to_uint64(AsNumber()));
}
-
// static
bool Literal::Match(void* a, void* b) {
Literal* x = static_cast<Literal*>(a);
@@ -1051,20 +1074,5 @@ const char* CallRuntime::debug_name() {
#endif // DEBUG
}
-#define RETURN_LABELS(NodeType) \
- case k##NodeType: \
- return static_cast<const NodeType*>(this)->labels();
-
-ZonePtrList<const AstRawString>* BreakableStatement::labels() const {
- switch (node_type()) {
- BREAKABLE_NODE_LIST(RETURN_LABELS)
- ITERATION_NODE_LIST(RETURN_LABELS)
- default:
- UNREACHABLE();
- }
-}
-
-#undef RETURN_LABELS
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 06af719d55..5bf2d7e192 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -15,6 +15,7 @@
#include "src/codegen/label.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/heap/factory.h"
#include "src/objects/elements-kind.h"
#include "src/objects/function-syntax-kind.h"
@@ -89,7 +90,6 @@ namespace internal {
V(CompoundAssignment) \
V(Conditional) \
V(CountOperation) \
- V(DoExpression) \
V(EmptyParentheses) \
V(FunctionLiteral) \
V(GetTemplateObject) \
@@ -99,7 +99,6 @@ namespace internal {
V(OptionalChain) \
V(Property) \
V(Spread) \
- V(StoreInArrayLiteral) \
V(SuperCallReference) \
V(SuperPropertyReference) \
V(TemplateLiteral) \
@@ -295,62 +294,28 @@ class FailureExpression : public Expression {
// V8's notion of BreakableStatement does not correspond to the notion of
// BreakableStatement in ECMAScript. In V8, the idea is that a
// BreakableStatement is a statement that can be the target of a break
-// statement. The BreakableStatement AST node carries a list of labels, any of
-// which can be used as an argument to the break statement in order to target
-// it.
+// statement.
//
-// Since we don't want to attach a list of labels to all kinds of statements, we
+// Since we don't want to track a list of labels for all kinds of statements, we
// only declare switchs, loops, and blocks as BreakableStatements. This means
// that we implement breaks targeting other statement forms as breaks targeting
// a substatement thereof. For instance, in "foo: if (b) { f(); break foo; }" we
// pretend that foo is the label of the inner block. That's okay because one
// can't observe the difference.
-//
-// This optimization makes it harder to detect invalid continue labels, see the
-// need for own_labels in IterationStatement.
-//
+// TODO(verwaest): Reconsider this optimization now that the tracking of labels
+// is done at runtime.
class BreakableStatement : public Statement {
- public:
- enum BreakableType {
- TARGET_FOR_ANONYMOUS,
- TARGET_FOR_NAMED_ONLY
- };
-
- // A list of all labels declared on the path up to the previous
- // BreakableStatement (if any).
- //
- // Example: "l1: for (;;) l2: l3: { l4: if (b) l5: { s } }"
- // labels() of the ForStatement will be l1.
- // labels() of the Block { l4: ... } will be l2, l3.
- // labels() of the Block { s } will be l4, l5.
- ZonePtrList<const AstRawString>* labels() const;
-
- // Testers.
- bool is_target_for_anonymous() const {
- return BreakableTypeField::decode(bit_field_) == TARGET_FOR_ANONYMOUS;
- }
-
- private:
- using BreakableTypeField = Statement::NextBitField<BreakableType, 1>;
-
protected:
- BreakableStatement(BreakableType breakable_type, int position, NodeType type)
- : Statement(position, type) {
- bit_field_ |= BreakableTypeField::encode(breakable_type);
- }
-
- template <class T, int size>
- using NextBitField = BreakableTypeField::Next<T, size>;
+ BreakableStatement(int position, NodeType type) : Statement(position, type) {}
};
-class Block : public BreakableStatement {
+class Block final : public BreakableStatement {
public:
ZonePtrList<Statement>* statements() { return &statements_; }
bool ignore_completion_value() const {
return IgnoreCompletionField::decode(bit_field_);
}
-
- inline ZonePtrList<const AstRawString>* labels() const;
+ bool is_breakable() const { return IsBreakableField::decode(bit_field_); }
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@@ -368,68 +333,22 @@ class Block : public BreakableStatement {
Scope* scope_;
using IgnoreCompletionField = BreakableStatement::NextBitField<bool, 1>;
- using IsLabeledField = IgnoreCompletionField::Next<bool, 1>;
+ using IsBreakableField = IgnoreCompletionField::Next<bool, 1>;
protected:
- Block(Zone* zone, ZonePtrList<const AstRawString>* labels, int capacity,
- bool ignore_completion_value)
- : BreakableStatement(TARGET_FOR_NAMED_ONLY, kNoSourcePosition, kBlock),
+ Block(Zone* zone, int capacity, bool ignore_completion_value,
+ bool is_breakable)
+ : BreakableStatement(kNoSourcePosition, kBlock),
statements_(capacity, zone),
scope_(nullptr) {
bit_field_ |= IgnoreCompletionField::encode(ignore_completion_value) |
- IsLabeledField::encode(labels != nullptr);
- }
-
- Block(ZonePtrList<const AstRawString>* labels, bool ignore_completion_value)
- : Block(nullptr, labels, 0, ignore_completion_value) {}
-};
-
-class LabeledBlock final : public Block {
- private:
- friend class AstNodeFactory;
- friend class Block;
-
- LabeledBlock(Zone* zone, ZonePtrList<const AstRawString>* labels,
- int capacity, bool ignore_completion_value)
- : Block(zone, labels, capacity, ignore_completion_value),
- labels_(labels) {
- DCHECK_NOT_NULL(labels);
- DCHECK_GT(labels->length(), 0);
+ IsBreakableField::encode(is_breakable);
}
- LabeledBlock(ZonePtrList<const AstRawString>* labels,
- bool ignore_completion_value)
- : LabeledBlock(nullptr, labels, 0, ignore_completion_value) {}
-
- ZonePtrList<const AstRawString>* labels_;
+ Block(bool ignore_completion_value, bool is_breakable)
+ : Block(nullptr, 0, ignore_completion_value, is_breakable) {}
};
-inline ZonePtrList<const AstRawString>* Block::labels() const {
- if (IsLabeledField::decode(bit_field_)) {
- return static_cast<const LabeledBlock*>(this)->labels_;
- }
- return nullptr;
-}
-
-class DoExpression final : public Expression {
- public:
- Block* block() { return block_; }
- VariableProxy* result() { return result_; }
-
- private:
- friend class AstNodeFactory;
-
- DoExpression(Block* block, VariableProxy* result, int pos)
- : Expression(pos, kDoExpression), block_(block), result_(result) {
- DCHECK_NOT_NULL(block_);
- DCHECK_NOT_NULL(result_);
- }
-
- Block* block_;
- VariableProxy* result_;
-};
-
-
class Declaration : public AstNode {
public:
using List = base::ThreadedList<Declaration>;
@@ -510,31 +429,12 @@ class IterationStatement : public BreakableStatement {
Statement* body() const { return body_; }
void set_body(Statement* s) { body_ = s; }
- ZonePtrList<const AstRawString>* labels() const { return labels_; }
-
- // A list of all labels that the iteration statement is directly prefixed
- // with, i.e. all the labels that a continue statement in the body can use to
- // continue this iteration statement. This is always a subset of {labels}.
- //
- // Example: "l1: { l2: if (b) l3: l4: for (;;) s }"
- // labels() of the Block will be l1.
- // labels() of the ForStatement will be l2, l3, l4.
- // own_labels() of the ForStatement will be l3, l4.
- ZonePtrList<const AstRawString>* own_labels() const { return own_labels_; }
-
protected:
- IterationStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos,
- NodeType type)
- : BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
- labels_(labels),
- own_labels_(own_labels),
- body_(nullptr) {}
+ IterationStatement(int pos, NodeType type)
+ : BreakableStatement(pos, type), body_(nullptr) {}
void Initialize(Statement* body) { body_ = body; }
private:
- ZonePtrList<const AstRawString>* labels_;
- ZonePtrList<const AstRawString>* own_labels_;
Statement* body_;
};
@@ -551,10 +451,8 @@ class DoWhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- DoWhileStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos)
- : IterationStatement(labels, own_labels, pos, kDoWhileStatement),
- cond_(nullptr) {}
+ explicit DoWhileStatement(int pos)
+ : IterationStatement(pos, kDoWhileStatement), cond_(nullptr) {}
Expression* cond_;
};
@@ -572,10 +470,8 @@ class WhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- WhileStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos)
- : IterationStatement(labels, own_labels, pos, kWhileStatement),
- cond_(nullptr) {}
+ explicit WhileStatement(int pos)
+ : IterationStatement(pos, kWhileStatement), cond_(nullptr) {}
Expression* cond_;
};
@@ -598,9 +494,8 @@ class ForStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- ForStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos)
- : IterationStatement(labels, own_labels, pos, kForStatement),
+ explicit ForStatement(int pos)
+ : IterationStatement(pos, kForStatement),
init_(nullptr),
cond_(nullptr),
next_(nullptr) {}
@@ -636,12 +531,8 @@ class ForEachStatement : public IterationStatement {
protected:
friend class AstNodeFactory;
- ForEachStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos,
- NodeType type)
- : IterationStatement(labels, own_labels, pos, type),
- each_(nullptr),
- subject_(nullptr) {}
+ ForEachStatement(int pos, NodeType type)
+ : IterationStatement(pos, type), each_(nullptr), subject_(nullptr) {}
Expression* each_;
Expression* subject_;
@@ -651,9 +542,7 @@ class ForInStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
- ForInStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos)
- : ForEachStatement(labels, own_labels, pos, kForInStatement) {}
+ explicit ForInStatement(int pos) : ForEachStatement(pos, kForInStatement) {}
};
enum class IteratorType { kNormal, kAsync };
@@ -664,11 +553,8 @@ class ForOfStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
- ForOfStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos,
- IteratorType type)
- : ForEachStatement(labels, own_labels, pos, kForOfStatement),
- type_(type) {}
+ ForOfStatement(int pos, IteratorType type)
+ : ForEachStatement(pos, kForOfStatement), type_(type) {}
IteratorType type_;
};
@@ -796,8 +682,6 @@ class CaseClause final : public ZoneObject {
class SwitchStatement final : public BreakableStatement {
public:
- ZonePtrList<const AstRawString>* labels() const { return labels_; }
-
Expression* tag() const { return tag_; }
void set_tag(Expression* t) { tag_ = t; }
@@ -806,14 +690,9 @@ class SwitchStatement final : public BreakableStatement {
private:
friend class AstNodeFactory;
- SwitchStatement(Zone* zone, ZonePtrList<const AstRawString>* labels,
- Expression* tag, int pos)
- : BreakableStatement(TARGET_FOR_ANONYMOUS, pos, kSwitchStatement),
- labels_(labels),
- tag_(tag),
- cases_(4, zone) {}
+ SwitchStatement(Zone* zone, Expression* tag, int pos)
+ : BreakableStatement(pos, kSwitchStatement), tag_(tag), cases_(4, zone) {}
- ZonePtrList<const AstRawString>* labels_;
Expression* tag_;
ZonePtrList<CaseClause> cases_;
};
@@ -1088,7 +967,8 @@ class Literal final : public Expression {
// Returns an appropriate Object representing this Literal, allocating
// a heap object if needed.
- Handle<Object> BuildValue(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<Object> BuildValue(LocalIsolate* isolate) const;
// Support for using Literal as a HashMap key. NOTE: Currently, this works
// only for string and number literals!
@@ -1164,20 +1044,23 @@ class MaterializedLiteral : public Expression {
bool NeedsInitialAllocationSite();
// Populate the constant properties/elements fixed array.
- void BuildConstants(Isolate* isolate);
+ template <typename LocalIsolate>
+ void BuildConstants(LocalIsolate* isolate);
// If the expression is a literal, return the literal value;
// if the expression is a materialized literal and is_simple
// then return an Array or Object Boilerplate Description
// Otherwise, return undefined literal as the placeholder
// in the object literal boilerplate.
- Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
+ template <typename LocalIsolate>
+ Handle<Object> GetBoilerplateValue(Expression* expression,
+ LocalIsolate* isolate);
};
// Node for capturing a regexp literal.
class RegExpLiteral final : public MaterializedLiteral {
public:
- Handle<String> pattern() const { return pattern_->string().get<Factory>(); }
+ Handle<String> pattern() const { return pattern_->string(); }
const AstRawString* raw_pattern() const { return pattern_; }
int flags() const { return flags_; }
@@ -1329,7 +1212,6 @@ class ObjectLiteralProperty final : public LiteralProperty {
bool emit_store_;
};
-
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
class ObjectLiteral final : public AggregateLiteral {
@@ -1365,8 +1247,9 @@ class ObjectLiteral final : public AggregateLiteral {
int InitDepthAndFlags();
// Get the boilerplate description, populating it if necessary.
+ template <typename LocalIsolate>
Handle<ObjectBoilerplateDescription> GetOrBuildBoilerplateDescription(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
@@ -1374,7 +1257,8 @@ class ObjectLiteral final : public AggregateLiteral {
}
// Populate the boilerplate description.
- void BuildBoilerplateDescription(Isolate* isolate);
+ template <typename LocalIsolate>
+ void BuildBoilerplateDescription(LocalIsolate* isolate);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1460,16 +1344,18 @@ class ArrayLiteral final : public AggregateLiteral {
int InitDepthAndFlags();
// Get the boilerplate description, populating it if necessary.
+ template <typename LocalIsolate>
Handle<ArrayBoilerplateDescription> GetOrBuildBoilerplateDescription(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
- return boilerplate_description();
+ return boilerplate_description_;
}
// Populate the boilerplate description.
- void BuildBoilerplateDescription(Isolate* isolate);
+ template <typename LocalIsolate>
+ void BuildBoilerplateDescription(LocalIsolate* isolate);
// Determines whether the {CreateShallowArrayLiteral} builtin can be used.
bool IsFastCloningSupported() const;
@@ -1507,7 +1393,7 @@ class VariableProxy final : public Expression {
public:
bool IsValidReferenceExpression() const { return !is_new_target(); }
- Handle<String> name() const { return raw_name()->string().get<Factory>(); }
+ Handle<String> name() const { return raw_name()->string(); }
const AstRawString* raw_name() const {
return is_resolved() ? var_->raw_name() : raw_name_;
}
@@ -2039,30 +1925,6 @@ class Spread final : public Expression {
Expression* expression_;
};
-// The StoreInArrayLiteral node corresponds to the StaInArrayLiteral bytecode.
-// It is used in the rewriting of destructuring assignments that contain an
-// array rest pattern.
-class StoreInArrayLiteral final : public Expression {
- public:
- Expression* array() const { return array_; }
- Expression* index() const { return index_; }
- Expression* value() const { return value_; }
-
- private:
- friend class AstNodeFactory;
-
- StoreInArrayLiteral(Expression* array, Expression* index, Expression* value,
- int position)
- : Expression(position, kStoreInArrayLiteral),
- array_(array),
- index_(index),
- value_(value) {}
-
- Expression* array_;
- Expression* index_;
- Expression* value_;
-};
-
class Conditional final : public Expression {
public:
Expression* condition() const { return condition_; }
@@ -2218,9 +2080,9 @@ class FunctionLiteral final : public Expression {
// Empty handle means that the function does not have a shared name (i.e.
// the name will be set dynamically after creation of the function closure).
- MaybeHandle<String> name() const {
- return raw_name_ ? raw_name_->string().get<Factory>()
- : MaybeHandle<String>();
+ template <typename LocalIsolate>
+ MaybeHandle<String> GetName(LocalIsolate* isolate) const {
+ return raw_name_ ? raw_name_->AllocateFlat(isolate) : MaybeHandle<String>();
}
bool has_shared_name() const { return raw_name_ != nullptr; }
const AstConsString* raw_name() const { return raw_name_; }
@@ -2278,21 +2140,26 @@ class FunctionLiteral final : public Expression {
// Returns either name or inferred name as a cstring.
std::unique_ptr<char[]> GetDebugName() const;
- Handle<String> inferred_name() const {
+ Handle<String> GetInferredName(Isolate* isolate) {
if (!inferred_name_.is_null()) {
DCHECK_NULL(raw_inferred_name_);
return inferred_name_;
}
if (raw_inferred_name_ != nullptr) {
- return raw_inferred_name_->string().get<Factory>();
+ return raw_inferred_name_->GetString(isolate);
}
UNREACHABLE();
}
+ Handle<String> GetInferredName(OffThreadIsolate* isolate) const {
+ DCHECK(inferred_name_.is_null());
+ DCHECK_NOT_NULL(raw_inferred_name_);
+ return raw_inferred_name_->GetString(isolate);
+ }
const AstConsString* raw_inferred_name() { return raw_inferred_name_; }
// Only one of {set_inferred_name, set_raw_inferred_name} should be called.
void set_inferred_name(Handle<String> inferred_name);
- void set_raw_inferred_name(const AstConsString* raw_inferred_name);
+ void set_raw_inferred_name(AstConsString* raw_inferred_name);
bool pretenure() const { return Pretenure::decode(bit_field_); }
void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); }
@@ -2351,7 +2218,20 @@ class FunctionLiteral final : public Expression {
return RequiresInstanceMembersInitializer::decode(bit_field_);
}
- bool requires_brand_initialization() const;
+ void set_has_static_private_methods_or_accessors(bool value) {
+ bit_field_ =
+ HasStaticPrivateMethodsOrAccessorsField::update(bit_field_, value);
+ }
+ bool has_static_private_methods_or_accessors() const {
+ return HasStaticPrivateMethodsOrAccessorsField::decode(bit_field_);
+ }
+
+ void set_class_scope_has_private_brand(bool value) {
+ bit_field_ = ClassScopeHasPrivateBrandField::update(bit_field_, value);
+ }
+ bool class_scope_has_private_brand() const {
+ return ClassScopeHasPrivateBrandField::decode(bit_field_);
+ }
bool private_name_lookup_skips_outer_class() const;
@@ -2403,7 +2283,11 @@ class FunctionLiteral final : public Expression {
HasDuplicateParameters::Next<BailoutReason, 8>;
using RequiresInstanceMembersInitializer =
DontOptimizeReasonField::Next<bool, 1>;
- using HasBracesField = RequiresInstanceMembersInitializer::Next<bool, 1>;
+ using ClassScopeHasPrivateBrandField =
+ RequiresInstanceMembersInitializer::Next<bool, 1>;
+ using HasStaticPrivateMethodsOrAccessorsField =
+ ClassScopeHasPrivateBrandField::Next<bool, 1>;
+ using HasBracesField = HasStaticPrivateMethodsOrAccessorsField::Next<bool, 1>;
using OneshotIIFEBit = HasBracesField::Next<bool, 1>;
// expected_property_count_ is the sum of instance fields and properties.
@@ -2418,7 +2302,7 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name_;
DeclarationScope* scope_;
ZonePtrList<Statement> body_;
- const AstConsString* raw_inferred_name_;
+ AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
ProducedPreparseData* produced_preparse_data_;
};
@@ -2569,7 +2453,7 @@ class ClassLiteral final : public Expression {
class NativeFunctionLiteral final : public Expression {
public:
- Handle<String> name() const { return name_->string().get<Factory>(); }
+ Handle<String> name() const { return name_->string(); }
const AstRawString* raw_name() const { return name_; }
v8::Extension* extension() const { return extension_; }
@@ -2663,7 +2547,9 @@ class GetTemplateObject final : public Expression {
return raw_strings_;
}
- Handle<TemplateObjectDescription> GetOrBuildDescription(Isolate* isolate);
+ template <typename LocalIsolate>
+ Handle<TemplateObjectDescription> GetOrBuildDescription(
+ LocalIsolate* isolate);
private:
friend class AstNodeFactory;
@@ -2825,59 +2711,46 @@ class AstNodeFactory final {
}
Block* NewBlock(int capacity, bool ignore_completion_value) {
- return new (zone_) Block(zone_, nullptr, capacity, ignore_completion_value);
+ return new (zone_) Block(zone_, capacity, ignore_completion_value, false);
}
- Block* NewBlock(bool ignore_completion_value,
- ZonePtrList<const AstRawString>* labels) {
- return labels != nullptr
- ? new (zone_) LabeledBlock(labels, ignore_completion_value)
- : new (zone_) Block(labels, ignore_completion_value);
+ Block* NewBlock(bool ignore_completion_value, bool is_breakable) {
+ return new (zone_) Block(ignore_completion_value, is_breakable);
}
Block* NewBlock(bool ignore_completion_value,
const ScopedPtrList<Statement>& statements) {
- Block* result = NewBlock(ignore_completion_value, nullptr);
+ Block* result = NewBlock(ignore_completion_value, false);
result->InitializeStatements(statements, zone_);
return result;
}
-#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZonePtrList<const AstRawString>* labels, \
- ZonePtrList<const AstRawString>* own_labels, \
- int pos) { \
- return new (zone_) NodeType(labels, own_labels, pos); \
- }
- STATEMENT_WITH_LABELS(DoWhileStatement)
- STATEMENT_WITH_LABELS(WhileStatement)
- STATEMENT_WITH_LABELS(ForStatement)
-#undef STATEMENT_WITH_LABELS
+#define STATEMENT_WITH_POSITION(NodeType) \
+ NodeType* New##NodeType(int pos) { return new (zone_) NodeType(pos); }
+ STATEMENT_WITH_POSITION(DoWhileStatement)
+ STATEMENT_WITH_POSITION(WhileStatement)
+ STATEMENT_WITH_POSITION(ForStatement)
+#undef STATEMENT_WITH_POSITION
- SwitchStatement* NewSwitchStatement(ZonePtrList<const AstRawString>* labels,
- Expression* tag, int pos) {
- return new (zone_) SwitchStatement(zone_, labels, tag, pos);
+ SwitchStatement* NewSwitchStatement(Expression* tag, int pos) {
+ return new (zone_) SwitchStatement(zone_, tag, pos);
}
- ForEachStatement* NewForEachStatement(
- ForEachStatement::VisitMode visit_mode,
- ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos) {
+ ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+ int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- return new (zone_) ForInStatement(labels, own_labels, pos);
+ return new (zone_) ForInStatement(pos);
}
case ForEachStatement::ITERATE: {
- return new (zone_)
- ForOfStatement(labels, own_labels, pos, IteratorType::kNormal);
+ return new (zone_) ForOfStatement(pos, IteratorType::kNormal);
}
}
UNREACHABLE();
}
- ForOfStatement* NewForOfStatement(ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels,
- int pos, IteratorType type) {
- return new (zone_) ForOfStatement(labels, own_labels, pos, type);
+ ForOfStatement* NewForOfStatement(int pos, IteratorType type) {
+ return new (zone_) ForOfStatement(pos, type);
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
@@ -3176,12 +3049,6 @@ class AstNodeFactory final {
return new (zone_) Spread(expression, pos, expr_pos);
}
- StoreInArrayLiteral* NewStoreInArrayLiteral(Expression* array,
- Expression* index,
- Expression* value, int pos) {
- return new (zone_) StoreInArrayLiteral(array, index, value, pos);
- }
-
Conditional* NewConditional(Expression* condition,
Expression* then_expression,
Expression* else_expression,
@@ -3292,11 +3159,6 @@ class AstNodeFactory final {
return new (zone_) NativeFunctionLiteral(name, extension, pos);
}
- DoExpression* NewDoExpression(Block* block, Variable* result_var, int pos) {
- VariableProxy* result = NewVariableProxy(result_var, pos);
- return new (zone_) DoExpression(block, result, pos);
- }
-
SuperPropertyReference* NewSuperPropertyReference(Expression* home_object,
int pos) {
return new (zone_) SuperPropertyReference(home_object, pos);
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 533a11524c..99371306fc 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -5,6 +5,7 @@
#include "src/ast/modules.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
#include "src/parsing/pending-compilation-error-handler.h"
@@ -84,17 +85,17 @@ void SourceTextModuleDescriptor::AddStarExport(
}
namespace {
-Handle<PrimitiveHeapObject> ToStringOrUndefined(Isolate* isolate,
+template <typename LocalIsolate>
+Handle<PrimitiveHeapObject> ToStringOrUndefined(LocalIsolate* isolate,
const AstRawString* s) {
- return (s == nullptr)
- ? Handle<PrimitiveHeapObject>::cast(
- isolate->factory()->undefined_value())
- : Handle<PrimitiveHeapObject>::cast(s->string().get<Factory>());
+ if (s == nullptr) return isolate->factory()->undefined_value();
+ return s->string();
}
} // namespace
+template <typename LocalIsolate>
Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
- Isolate* isolate) const {
+ LocalIsolate* isolate) const {
CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
return SourceTextModuleInfoEntry::New(
isolate, ToStringOrUndefined(isolate, export_name),
@@ -102,9 +103,14 @@ Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
ToStringOrUndefined(isolate, import_name), module_request, cell_index,
location.beg_pos, location.end_pos);
}
+template Handle<SourceTextModuleInfoEntry>
+SourceTextModuleDescriptor::Entry::Serialize(Isolate* isolate) const;
+template Handle<SourceTextModuleInfoEntry>
+SourceTextModuleDescriptor::Entry::Serialize(OffThreadIsolate* isolate) const;
+template <typename LocalIsolate>
Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
- Isolate* isolate, Zone* zone) const {
+ LocalIsolate* isolate, Zone* zone) const {
// We serialize regular exports in a way that lets us later iterate over their
// local names and for each local name immediately access all its export
// names. (Regular exports have neither import name nor module request.)
@@ -127,7 +133,7 @@ Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(count);
data[index + SourceTextModuleInfo::kRegularExportLocalNameOffset] =
- it->second->local_name->string().get<Factory>();
+ it->second->local_name->string();
data[index + SourceTextModuleInfo::kRegularExportCellIndexOffset] =
handle(Smi::FromInt(it->second->cell_index), isolate);
data[index + SourceTextModuleInfo::kRegularExportExportNamesOffset] =
@@ -137,7 +143,7 @@ Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
// Collect the export names.
int i = 0;
for (; it != next; ++it) {
- export_names->set(i++, *it->second->export_name->string().get<Factory>());
+ export_names->set(i++, *it->second->export_name->string());
}
DCHECK_EQ(i, count);
@@ -155,6 +161,10 @@ Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
}
return result;
}
+template Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
+ Isolate* isolate, Zone* zone) const;
+template Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
+ OffThreadIsolate* isolate, Zone* zone) const;
void SourceTextModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 4921d41932..b57387b25f 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -107,7 +107,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
module_request(-1),
cell_index(0) {}
- Handle<SourceTextModuleInfoEntry> Serialize(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<SourceTextModuleInfoEntry> Serialize(LocalIsolate* isolate) const;
};
enum CellIndexKind { kInvalid, kExport, kImport };
@@ -184,7 +185,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
namespace_imports_.push_back(entry);
}
- Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
+ template <typename LocalIsolate>
+ Handle<FixedArray> SerializeRegularExports(LocalIsolate* isolate,
Zone* zone) const;
private:
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 4d8bd006c5..20dca56cc4 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -17,7 +17,8 @@
namespace v8 {
namespace internal {
-CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
+CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js,
+ SpreadErrorInArgsHint error_in_spread_args)
: builder_(new IncrementalStringBuilder(isolate)) {
isolate_ = isolate;
position_ = 0;
@@ -30,6 +31,8 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
destructuring_prop_ = nullptr;
destructuring_assignment_ = nullptr;
is_user_js_ = is_user_js;
+ error_in_spread_args_ = error_in_spread_args;
+ spread_arg_ = nullptr;
function_kind_ = kNormalFunction;
InitializeAstVisitor(isolate);
}
@@ -235,9 +238,6 @@ void CallPrinter::VisitInitializeClassMembersStatement(
void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {}
-void CallPrinter::VisitDoExpression(DoExpression* node) { Find(node->block()); }
-
-
void CallPrinter::VisitConditional(Conditional* node) {
Find(node->condition());
Find(node->then_expression());
@@ -404,9 +404,20 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) {
bool was_found = false;
if (node->position() == position_) {
+ if (error_in_spread_args_ == SpreadErrorInArgsHint::kErrorInArgs) {
+ found_ = true;
+ spread_arg_ = node->arguments()->last()->AsSpread()->expression();
+ Find(spread_arg_, true);
+
+ done_ = true;
+ found_ = false;
+ return;
+ }
+
is_call_error_ = true;
was_found = !found_;
}
+
if (was_found) {
// Bail out if the error is caused by a direct call to a variable in
// non-user JS code. The variable name is meaningless due to minification.
@@ -429,6 +440,16 @@ void CallPrinter::VisitCall(Call* node) {
void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = false;
if (node->position() == position_) {
+ if (error_in_spread_args_ == SpreadErrorInArgsHint::kErrorInArgs) {
+ found_ = true;
+ spread_arg_ = node->arguments()->last()->AsSpread()->expression();
+ Find(spread_arg_, true);
+
+ done_ = true;
+ found_ = false;
+ return;
+ }
+
is_call_error_ = true;
was_found = !found_;
}
@@ -515,12 +536,6 @@ void CallPrinter::VisitSpread(Spread* node) {
Print(")");
}
-void CallPrinter::VisitStoreInArrayLiteral(StoreInArrayLiteral* node) {
- Find(node->array());
- Find(node->index());
- Find(node->value());
-}
-
void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
@@ -587,7 +602,7 @@ void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
- PrintLiteral(value->string().get<Factory>(), quote);
+ PrintLiteral(value->string(), quote);
}
//-----------------------------------------------------------------------------
@@ -638,15 +653,6 @@ void AstPrinter::Print(const char* format, ...) {
}
}
-void AstPrinter::PrintLabels(ZonePtrList<const AstRawString>* labels) {
- if (labels != nullptr) {
- for (int i = 0; i < labels->length(); i++) {
- PrintLiteral(labels->at(i), false);
- Print(": ");
- }
- }
-}
-
void AstPrinter::PrintLiteral(Literal* literal, bool quote) {
switch (literal->type()) {
case Literal::kString:
@@ -798,16 +804,6 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
}
}
-void AstPrinter::PrintLabelsIndented(ZonePtrList<const AstRawString>* labels,
- const char* prefix) {
- if (labels == nullptr || labels->length() == 0) return;
- PrintIndented(prefix);
- Print("LABELS ");
- PrintLabels(labels);
- Print("\n");
-}
-
-
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
if (node != nullptr) {
IndentedScope indent(this, s, node->position());
@@ -832,6 +828,12 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
if (program->requires_instance_members_initializer()) {
Print(" REQUIRES INSTANCE FIELDS INITIALIZER\n");
}
+ if (program->class_scope_has_private_brand()) {
+ Print(" CLASS SCOPE HAS PRIVATE BRAND\n");
+ }
+ if (program->has_static_private_methods_or_accessors()) {
+ Print(" HAS STATIC PRIVATE METHODS\n");
+ }
PrintParameters(program->scope());
PrintDeclarations(program->scope()->declarations());
PrintStatements(program->body());
@@ -881,7 +883,6 @@ void AstPrinter::VisitBlock(Block* node) {
const char* block_txt =
node->ignore_completion_value() ? "BLOCK NOCOMPLETIONS" : "BLOCK";
IndentedScope indent(this, block_txt, node->position());
- PrintLabelsIndented(node->labels());
PrintStatements(node->statements());
}
@@ -932,13 +933,11 @@ void AstPrinter::VisitIfStatement(IfStatement* node) {
void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
IndentedScope indent(this, "CONTINUE", node->position());
- PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitBreakStatement(BreakStatement* node) {
IndentedScope indent(this, "BREAK", node->position());
- PrintLabelsIndented(node->target()->labels());
}
@@ -957,7 +956,6 @@ void AstPrinter::VisitWithStatement(WithStatement* node) {
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
IndentedScope indent(this, "SWITCH", node->position());
- PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (CaseClause* clause : *node->cases()) {
if (clause->is_default()) {
@@ -974,8 +972,6 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position());
- PrintLabelsIndented(node->labels());
- PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
}
@@ -983,8 +979,6 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position());
- PrintLabelsIndented(node->labels());
- PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
}
@@ -992,8 +986,6 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position());
- PrintLabelsIndented(node->labels());
- PrintLabelsIndented(node->own_labels(), "OWN ");
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -1003,8 +995,6 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position());
- PrintLabelsIndented(node->labels());
- PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->subject());
PrintIndentedVisit("BODY", node->body());
@@ -1013,8 +1003,6 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
- PrintLabelsIndented(node->labels());
- PrintLabelsIndented(node->own_labels(), "OWN ");
const char* for_type;
switch (node->type()) {
case IteratorType::kNormal:
@@ -1155,12 +1143,6 @@ void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
}
-void AstPrinter::VisitDoExpression(DoExpression* node) {
- IndentedScope indent(this, "DO EXPRESSION", node->position());
- PrintStatements(node->block()->statements());
-}
-
-
void AstPrinter::VisitConditional(Conditional* node) {
IndentedScope indent(this, "CONDITIONAL", node->position());
PrintIndentedVisit("CONDITION", node->condition());
@@ -1428,13 +1410,6 @@ void AstPrinter::VisitSpread(Spread* node) {
Visit(node->expression());
}
-void AstPrinter::VisitStoreInArrayLiteral(StoreInArrayLiteral* node) {
- IndentedScope indent(this, "STORE IN ARRAY LITERAL", node->position());
- PrintIndentedVisit("ARRAY", node->array());
- PrintIndentedVisit("INDEX", node->index());
- PrintIndentedVisit("VALUE", node->value());
-}
-
void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
IndentedScope indent(this, "()", node->position());
}
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 795436d422..4b939c7d17 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -19,7 +19,11 @@ class IncrementalStringBuilder; // to avoid including string-builder-inl.h
class CallPrinter final : public AstVisitor<CallPrinter> {
public:
- explicit CallPrinter(Isolate* isolate, bool is_user_js);
+ enum class SpreadErrorInArgsHint { kErrorInArgs, kNoErrorInArgs };
+
+ explicit CallPrinter(Isolate* isolate, bool is_user_js,
+ SpreadErrorInArgsHint error_in_spread_args =
+ SpreadErrorInArgsHint::kNoErrorInArgs);
~CallPrinter();
// The following routine prints the node with position |position| into a
@@ -32,7 +36,9 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
kCallAndNormalIterator,
kCallAndAsyncIterator
};
+
ErrorHint GetErrorHint() const;
+ Expression* spread_arg() const { return spread_arg_; }
ObjectLiteralProperty* destructuring_prop() const {
return destructuring_prop_;
}
@@ -62,8 +68,10 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
bool is_iterator_error_;
bool is_async_iterator_error_;
bool is_call_error_;
+ SpreadErrorInArgsHint error_in_spread_args_;
ObjectLiteralProperty* destructuring_prop_;
Assignment* destructuring_assignment_;
+ Expression* spread_arg_;
FunctionKind function_kind_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index b422e56c4f..8c13556db9 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -11,6 +11,7 @@
#include "src/base/optional.h"
#include "src/builtins/accessors.h"
#include "src/common/message-template.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/module-inl.h"
@@ -572,10 +573,11 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
DeclarationScope* scope = info->literal()->scope();
base::Optional<AllowHandleDereference> allow_deref;
- if (!info->maybe_outer_scope_info().is_null()) {
- // Allow dereferences to the scope info if there is one.
+#ifdef DEBUG
+ if (scope->outer_scope() && !scope->outer_scope()->scope_info_.is_null()) {
allow_deref.emplace();
}
+#endif
if (scope->is_eval_scope() && is_sloppy(scope->language_mode())) {
AstNodeFactory factory(info->ast_value_factory(), info->zone());
@@ -835,7 +837,8 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
DCHECK_NULL(cache->variables_.Lookup(name));
DisallowHeapAllocation no_gc;
- String name_handle = *name->string().get<Factory>();
+ String name_handle = *name->string();
+ ScopeInfo scope_info = *scope_info_;
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
// heap-independent mode, and all strings must be internalized immediately. So
// it's ok to get the Handle<String> here.
@@ -850,21 +853,21 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
{
location = VariableLocation::CONTEXT;
- index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag,
- &is_static_flag);
+ index =
+ ScopeInfo::ContextSlotIndex(scope_info, name_handle, &mode, &init_flag,
+ &maybe_assigned_flag, &is_static_flag);
found = index >= 0;
}
if (!found && is_module_scope()) {
location = VariableLocation::MODULE;
- index = scope_info_->ModuleIndex(name_handle, &mode, &init_flag,
- &maybe_assigned_flag);
+ index = scope_info.ModuleIndex(name_handle, &mode, &init_flag,
+ &maybe_assigned_flag);
found = index != 0;
}
if (!found) {
- index = scope_info_->FunctionContextSlotIndex(name_handle);
+ index = scope_info.FunctionContextSlotIndex(name_handle);
if (index < 0) return nullptr; // Nowhere found.
Variable* var = AsDeclarationScope()->DeclareFunctionVar(name, cache);
DCHECK_EQ(VariableMode::kConst, var->mode());
@@ -873,7 +876,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
}
if (!is_module_scope()) {
- DCHECK_NE(index, scope_info_->ReceiverContextSlotIndex());
+ DCHECK_NE(index, scope_info.ReceiverContextSlotIndex());
}
bool was_added;
@@ -1236,7 +1239,7 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) {
return false;
}
- if (!ResolveVariablesRecursively(info)) {
+ if (!ResolveVariablesRecursively(info->scope())) {
DCHECK(info->pending_error_handler()->has_pending_error());
return false;
}
@@ -1427,9 +1430,8 @@ bool Scope::IsOuterScopeOf(Scope* other) const {
}
void Scope::CollectNonLocals(DeclarationScope* max_outer_scope,
- Isolate* isolate, ParseInfo* info,
- Handle<StringSet>* non_locals) {
- this->ForEach([max_outer_scope, isolate, info, non_locals](Scope* scope) {
+ Isolate* isolate, Handle<StringSet>* non_locals) {
+ this->ForEach([max_outer_scope, isolate, non_locals](Scope* scope) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (scope->is_module_scope()) {
@@ -1451,7 +1453,7 @@ void Scope::CollectNonLocals(DeclarationScope* max_outer_scope,
// In this case we need to leave scopes in a way that they can be
// allocated. If we resolved variables from lazy parsed scopes, we need
// to context allocate the var.
- scope->ResolveTo(info, proxy, var);
+ scope->ResolveTo(proxy, var);
if (!var->is_dynamic() && lookup != scope)
var->ForceContextAllocation();
}
@@ -1500,8 +1502,8 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
}
Handle<StringSet> DeclarationScope::CollectNonLocals(
- Isolate* isolate, ParseInfo* info, Handle<StringSet> non_locals) {
- Scope::CollectNonLocals(this, isolate, info, &non_locals);
+ Isolate* isolate, Handle<StringSet> non_locals) {
+ Scope::CollectNonLocals(this, isolate, &non_locals);
return non_locals;
}
@@ -2105,12 +2107,11 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
return var;
}
-void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
- DCHECK(info->script_scope()->is_script_scope());
+void Scope::ResolveVariable(VariableProxy* proxy) {
DCHECK(!proxy->is_resolved());
Variable* var = Lookup<kParsedScope>(proxy, this, nullptr);
DCHECK_NOT_NULL(var);
- ResolveTo(info, proxy, var);
+ ResolveTo(proxy, var);
}
namespace {
@@ -2173,7 +2174,7 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
} // anonymous namespace
-void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
+void Scope::ResolveTo(VariableProxy* proxy, Variable* var) {
DCHECK_NOT_NULL(var);
UpdateNeedsHoleCheck(var, proxy, this);
proxy->BindTo(var);
@@ -2195,14 +2196,12 @@ void Scope::ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope,
}
}
-bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
- DCHECK(info->script_scope()->is_script_scope());
+bool Scope::ResolveVariablesRecursively(Scope* end) {
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
if (WasLazilyParsed(this)) {
DCHECK_EQ(variables_.occupancy(), 0);
- Scope* end = info->scope();
// Resolve in all parsed scopes except for the script scope.
if (!end->is_script_scope()) end = end->outer_scope();
@@ -2212,13 +2211,13 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
} else {
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy : unresolved_list_) {
- ResolveVariable(info, proxy);
+ ResolveVariable(proxy);
}
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
- if (!scope->ResolveVariablesRecursively(info)) return false;
+ if (!scope->ResolveVariablesRecursively(end)) return false;
}
}
return true;
@@ -2449,7 +2448,8 @@ void Scope::AllocateVariablesRecursively() {
});
}
-void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
+template <typename LocalIsolate>
+void Scope::AllocateScopeInfosRecursively(LocalIsolate* isolate,
MaybeHandle<ScopeInfo> outer_scope) {
DCHECK(scope_info_.is_null());
MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
@@ -2470,6 +2470,13 @@ void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
}
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Scope::
+ AllocateScopeInfosRecursively<Isolate>(Isolate* isolate,
+ MaybeHandle<ScopeInfo> outer_scope);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Scope::
+ AllocateScopeInfosRecursively<OffThreadIsolate>(
+ OffThreadIsolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
+
void DeclarationScope::RecalcPrivateNameContextChain() {
// The outermost scope in a class heritage expression is marked to skip the
// class scope during private name resolution. It is possible, however, that
@@ -2512,7 +2519,9 @@ void DeclarationScope::RecordNeedsPrivateNameContextChainRecalc() {
}
// static
-void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
+template <typename LocalIsolate>
+void DeclarationScope::AllocateScopeInfos(ParseInfo* info,
+ LocalIsolate* isolate) {
DeclarationScope* scope = info->literal()->scope();
// No one else should have allocated a scope info for this scope yet.
@@ -2520,6 +2529,7 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
MaybeHandle<ScopeInfo> outer_scope;
if (scope->outer_scope_ != nullptr) {
+ DCHECK((std::is_same<Isolate, v8::internal::Isolate>::value));
outer_scope = scope->outer_scope_->scope_info_;
}
@@ -2540,11 +2550,15 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
// Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts.
if (info->script_scope() && info->script_scope()->scope_info_.is_null()) {
- info->script_scope()->scope_info_ =
- handle(ScopeInfo::Empty(isolate), isolate);
+ info->script_scope()->scope_info_ = isolate->factory()->empty_scope_info();
}
}
+template V8_EXPORT_PRIVATE void DeclarationScope::AllocateScopeInfos<Isolate>(
+ ParseInfo* info, Isolate* isolate);
+template V8_EXPORT_PRIVATE void DeclarationScope::AllocateScopeInfos<
+ OffThreadIsolate>(ParseInfo* info, OffThreadIsolate* isolate);
+
int Scope::ContextLocalCount() const {
if (num_heap_slots() == 0) return 0;
Variable* function =
@@ -2655,7 +2669,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
DCHECK_NULL(LookupLocalPrivateName(name));
DisallowHeapAllocation no_gc;
- String name_handle = *name->string().get<Factory>();
+ String name_handle = *name->string();
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 4955c5902b..08bbc696d9 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -645,9 +645,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool force_context_allocation);
static void ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope,
Scope* end);
- void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
- void ResolveVariable(ParseInfo* info, VariableProxy* proxy);
- V8_WARN_UNUSED_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
+ void ResolveTo(VariableProxy* proxy, Variable* var);
+ void ResolveVariable(VariableProxy* proxy);
+ V8_WARN_UNUSED_RESULT bool ResolveVariablesRecursively(Scope* end);
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
@@ -656,7 +656,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
UnresolvedList* new_unresolved_list,
bool maybe_in_arrowhead);
void CollectNonLocals(DeclarationScope* max_outer_scope, Isolate* isolate,
- ParseInfo* info, Handle<StringSet>* non_locals);
+ Handle<StringSet>* non_locals);
// Predicates.
bool MustAllocate(Variable* var);
@@ -670,7 +670,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
V8_INLINE void AllocateNonParameterLocalsAndDeclaredGlobals();
void AllocateVariablesRecursively();
- void AllocateScopeInfosRecursively(Isolate* isolate,
+ template <typename LocalIsolate>
+ void AllocateScopeInfosRecursively(LocalIsolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
void AllocateDebuggerScopeInfos(Isolate* isolate,
@@ -691,6 +692,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void SetDefaults();
+ void set_scope_info(Handle<ScopeInfo> scope_info);
+
friend class DeclarationScope;
friend class ClassScope;
friend class ScopeTestHelper;
@@ -1098,9 +1101,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Allocate ScopeInfos for top scope and any inner scopes that need them.
// Does nothing if ScopeInfo is already allocated.
- static void AllocateScopeInfos(ParseInfo* info, Isolate* isolate);
+ template <typename LocalIsolate>
+ V8_EXPORT_PRIVATE static void AllocateScopeInfos(ParseInfo* info,
+ LocalIsolate* isolate);
- Handle<StringSet> CollectNonLocals(Isolate* isolate, ParseInfo* info,
+ Handle<StringSet> CollectNonLocals(Isolate* isolate,
Handle<StringSet> non_locals);
// Determine if we can use lazy compilation for this scope.
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 3a8ca8888f..a3a5199620 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -8,6 +8,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/base/threaded-list.h"
#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -57,7 +58,7 @@ class Variable final : public ZoneObject {
// parameter initializers.
void set_scope(Scope* scope) { scope_ = scope; }
- Handle<String> name() const { return name_->string().get<Factory>(); }
+ Handle<String> name() const { return name_->string(); }
const AstRawString* raw_name() const { return name_; }
VariableMode mode() const { return VariableModeField::decode(bit_field_); }
void set_mode(VariableMode mode) {
diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h
index 0f4809f9e8..9ef6160d2a 100644
--- a/deps/v8/src/base/address-region.h
+++ b/deps/v8/src/base/address-region.h
@@ -66,6 +66,16 @@ class AddressRegion {
};
ASSERT_TRIVIALLY_COPYABLE(AddressRegion);
+// Construct an AddressRegion from anything providing a {data()} and {size()}
+// accessor.
+template <typename Container,
+ typename = decltype(std::declval<Container>().data()),
+ typename = decltype(std::declval<Container>().size())>
+inline constexpr AddressRegion AddressRegionOf(Container&& c) {
+ return AddressRegion{reinterpret_cast<AddressRegion::Address>(c.data()),
+ sizeof(*c.data()) * c.size()};
+}
+
inline std::ostream& operator<<(std::ostream& out, AddressRegion region) {
return out << "[" << reinterpret_cast<void*>(region.begin()) << "+"
<< region.size() << "]";
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 7b6bd85155..5d51510e3c 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -105,6 +105,16 @@ class AsAtomicImpl {
}
template <typename T>
+ static T Relaxed_CompareAndSwap(
+ T* addr, typename std::remove_reference<T>::type old_value,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
+ return cast_helper<T>::to_return_type(base::Relaxed_CompareAndSwap(
+ to_storage_addr(addr), cast_helper<T>::to_storage_type(old_value),
+ cast_helper<T>::to_storage_type(new_value)));
+ }
+
+ template <typename T>
static T AcquireRelease_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
@@ -120,13 +130,14 @@ class AsAtomicImpl {
static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
- T old_value;
- T new_value;
+ T old_value = Relaxed_Load(addr);
+ T new_value, old_value_before_cas;
do {
- old_value = Relaxed_Load(addr);
if ((old_value & mask) == bits) return false;
new_value = (old_value & ~mask) | bits;
- } while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
+ old_value_before_cas = old_value;
+ old_value = Release_CompareAndSwap(addr, old_value, new_value);
+ } while (old_value != old_value_before_cas);
return true;
}
@@ -192,6 +203,16 @@ inline void CheckedDecrement(std::atomic<T>* number, T amount) {
USE(old);
}
+template <typename T>
+V8_INLINE std::atomic<T>* AsAtomicPtr(T* t) {
+ return reinterpret_cast<std::atomic<T>*>(t);
+}
+
+template <typename T>
+V8_INLINE const std::atomic<T>* AsAtomicPtr(const T* t) {
+ return reinterpret_cast<const std::atomic<T>*>(t);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 35b2732949..11c41545ab 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -57,77 +57,66 @@ using Atomic64 = intptr_t;
using AtomicWord = intptr_t;
// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
+// result = *ptr;
+// if (result == old_value)
+// *ptr = new_value;
+// return result;
//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
+// I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|.
+// Always return the value of |*ptr| before the operation.
+// Acquire, Relaxed, Release correspond to standard C++ memory orders.
Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value,
Atomic16 new_value);
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
+ Atomic32 new_value);
+Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value);
Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
+ Atomic32 new_value);
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value);
+Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
+ Atomic64 new_value);
+#endif // V8_HOST_ARCH_64_BIT
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
+// Atomically store new_value into |*ptr|, returning the previous value held in
+// |*ptr|.
Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+#endif // V8_HOST_ARCH_64_BIT
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
+// Atomically increment |*ptr| by |increment|. Returns the new value of
+// |*ptr| with the increment applied.
Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
-Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment);
-
-// These following lower-level operations are typically useful only to people
-// implementing higher-level synchronization operations like spinlocks,
-// mutexes, and condition-variables. They combine CompareAndSwap(), a load,
-// or a store with appropriate memory-ordering instructions. "Acquire"
-// operations ensure that no later memory access can be reordered ahead of the
-// operation. "Release" operations ensure that no previous memory access can
-// be reordered after the operation. "Fence" operations have both "Acquire"
-// and "Release" semantics. A SeqCst_MemoryFence() has "Fence" semantics, but
-// does no memory access.
-Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value);
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+#endif // V8_HOST_ARCH_64_BIT
void SeqCst_MemoryFence();
+
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value);
void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+#ifdef V8_HOST_ARCH_64_BIT
+void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+#endif // V8_HOST_ARCH_64_BIT
Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
Atomic16 Relaxed_Load(volatile const Atomic16* ptr);
Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-
-// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-
-Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value);
-void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
-void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
diff --git a/deps/v8/src/base/atomicops_internals_atomicword_compat.h b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
index 7deacf2f45..5ed7d5594e 100644
--- a/deps/v8/src/base/atomicops_internals_atomicword_compat.h
+++ b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -42,12 +42,6 @@ inline AtomicWord Relaxed_AtomicIncrement(volatile AtomicWord* ptr,
increment);
}
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile Atomic32*>(ptr), increment);
-}
-
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
index 068c1e87ff..1f89f0a6b3 100644
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -74,11 +74,6 @@ inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
@@ -160,11 +155,6 @@ inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h
index c04e64bb32..8ea1019202 100644
--- a/deps/v8/src/base/atomicops_internals_std.h
+++ b/deps/v8/src/base/atomicops_internals_std.h
@@ -57,13 +57,6 @@ inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
std::memory_order_relaxed);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
- increment,
- std::memory_order_seq_cst);
-}
-
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
atomic_compare_exchange_strong_explicit(
@@ -161,13 +154,6 @@ inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
std::memory_order_relaxed);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
- increment,
- std::memory_order_seq_cst);
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
diff --git a/deps/v8/src/base/bits-iterator.h b/deps/v8/src/base/bits-iterator.h
new file mode 100644
index 0000000000..6ce656e6d7
--- /dev/null
+++ b/deps/v8/src/base/bits-iterator.h
@@ -0,0 +1,58 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BITS_ITERATOR_H_
+#define V8_BASE_BITS_ITERATOR_H_
+
+#include <type_traits>
+
+#include "src/base/bits.h"
+#include "src/base/iterator.h"
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+template <typename T, bool kMSBFirst = false>
+class BitsIterator : public iterator<std::forward_iterator_tag, int> {
+ STATIC_ASSERT(std::is_integral<T>::value);
+
+ public:
+ explicit BitsIterator(T bits) : bits_(bits) {}
+
+ int operator*() const {
+ return kMSBFirst ? 8 * sizeof(T) - 1 - CountLeadingZeros(bits_)
+ : CountTrailingZeros(bits_);
+ }
+
+ BitsIterator& operator++() {
+ bits_ &= ~(T{1} << **this);
+ return *this;
+ }
+
+ bool operator==(BitsIterator other) { return bits_ == other.bits_; }
+ bool operator!=(BitsIterator other) { return bits_ != other.bits_; }
+
+ private:
+ T bits_;
+};
+
+// Returns an iterable over the bits in {bits}, from LSB to MSB.
+template <typename T>
+auto IterateBits(T bits) {
+ return make_iterator_range(BitsIterator<T>{bits}, BitsIterator<T>{0});
+}
+
+// Returns an iterable over the bits in {bits}, from MSB to LSB.
+template <typename T>
+auto IterateBitsBackwards(T bits) {
+ return make_iterator_range(BitsIterator<T, true>{bits},
+ BitsIterator<T, true>{0});
+}
+
+} // namespace bits
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_BITS_ITERATOR_H_
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index f4300824eb..8d142c456c 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -33,13 +33,12 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__PPC64__) || defined(_ARCH_PPC64)
+#define V8_HOST_ARCH_PPC64 1
+#define V8_HOST_ARCH_64_BIT 1
#elif defined(__PPC__) || defined(_ARCH_PPC)
#define V8_HOST_ARCH_PPC 1
-#if defined(__PPC64__) || defined(_ARCH_PPC64)
-#define V8_HOST_ARCH_64_BIT 1
-#else
#define V8_HOST_ARCH_32_BIT 1
-#endif
#elif defined(__s390__) || defined(__s390x__)
#define V8_HOST_ARCH_S390 1
#if defined(__s390x__)
@@ -78,7 +77,7 @@
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
+ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -91,6 +90,8 @@
#define V8_TARGET_ARCH_MIPS64 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
+#elif defined(_ARCH_PPC64)
+#define V8_TARGET_ARCH_PPC64 1
#elif defined(_ARCH_PPC)
#define V8_TARGET_ARCH_PPC 1
#else
@@ -118,11 +119,9 @@
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
-#if V8_TARGET_ARCH_PPC64
-#define V8_TARGET_ARCH_64_BIT 1
-#else
#define V8_TARGET_ARCH_32_BIT 1
-#endif
+#elif V8_TARGET_ARCH_PPC64
+#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_S390
#if V8_TARGET_ARCH_S390X
#define V8_TARGET_ARCH_64_BIT 1
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 4f4ac2b328..f1c48fa135 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -9,7 +9,6 @@
#endif
#if V8_OS_LINUX
#include <linux/auxvec.h> // AT_HWCAP
-extern "C" char** environ;
#endif
#if V8_GLIBC_PREREQ(2, 16)
#include <sys/auxv.h> // getauxval()
@@ -17,7 +16,7 @@ extern "C" char** environ;
#if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo
#endif
-#if (V8_OS_LINUX && V8_HOST_ARCH_PPC) || V8_OS_ANDROID
+#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64)
#include <elf.h>
#endif
#if V8_OS_AIX
@@ -110,25 +109,31 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#define HWCAP_LPAE (1 << 20)
static uint32_t ReadELFHWCaps() {
+ uint32_t result = 0;
#if V8_GLIBC_PREREQ(2, 16)
- return static_cast<uint32_t>(getauxval(AT_HWCAP));
-#else
- char** head = environ;
- while (*head++ != nullptr) {
- }
-#ifdef __LP64__
- using elf_auxv_t = Elf64_auxv_t;
+ result = static_cast<uint32_t>(getauxval(AT_HWCAP));
#else
- using elf_auxv_t = Elf32_auxv_t;
-#endif
- for (elf_auxv_t* entry = reinterpret_cast<elf_auxv_t*>(head);
- entry->a_type != AT_NULL; ++entry) {
- if (entry->a_type == AT_HWCAP) {
- return entry->a_un.a_val;
+ // Read the ELF HWCAP flags by parsing /proc/self/auxv.
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != nullptr) {
+ struct {
+ uint32_t tag;
+ uint32_t value;
+ } entry;
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+ break;
+ }
+ if (entry.tag == AT_HWCAP) {
+ result = entry.value;
+ break;
+ }
}
+ fclose(fp);
}
- return 0u;
#endif
+ return result;
}
#endif // V8_HOST_ARCH_ARM
@@ -602,32 +607,37 @@ CPU::CPU()
#elif V8_HOST_ARCH_ARM64
// Implementer, variant and part are currently unused under ARM64.
-#elif V8_HOST_ARCH_PPC
+#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
#ifndef USE_SIMULATOR
#if V8_OS_LINUX
+ // Read processor info from /proc/self/auxv.
char* auxv_cpu_type = nullptr;
- char** head = environ;
- while (*head++ != nullptr) {
- }
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != nullptr) {
#if V8_TARGET_ARCH_PPC64
- using elf_auxv_t = Elf64_auxv_t;
+ Elf64_auxv_t entry;
#else
- using elf_auxv_t = Elf32_auxv_t;
+ Elf32_auxv_t entry;
#endif
- for (elf_auxv_t* entry = reinterpret_cast<elf_auxv_t*>(head);
- entry->a_type != AT_NULL; ++entry) {
- switch (entry->a_type) {
- case AT_PLATFORM:
- auxv_cpu_type = reinterpret_cast<char*>(entry->a_un.a_val);
- break;
- case AT_ICACHEBSIZE:
- icache_line_size_ = entry->a_un.a_val;
- break;
- case AT_DCACHEBSIZE:
- dcache_line_size_ = entry->a_un.a_val;
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || entry.a_type == AT_NULL) {
break;
+ }
+ switch (entry.a_type) {
+ case AT_PLATFORM:
+ auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
+ break;
+ case AT_ICACHEBSIZE:
+ icache_line_size_ = entry.a_un.a_val;
+ break;
+ case AT_DCACHEBSIZE:
+ dcache_line_size_ = entry.a_un.a_val;
+ break;
+ }
}
+ fclose(fp);
}
part_ = -1;
@@ -671,7 +681,7 @@ CPU::CPU()
}
#endif // V8_OS_AIX
#endif // !USE_SIMULATOR
-#endif // V8_HOST_ARCH_PPC
+#endif // V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
}
} // namespace base
diff --git a/deps/v8/src/base/enum-set.h b/deps/v8/src/base/enum-set.h
index ef7f0df9bd..927a8f87fe 100644
--- a/deps/v8/src/base/enum-set.h
+++ b/deps/v8/src/base/enum-set.h
@@ -29,32 +29,43 @@ class EnumSet {
bool empty() const { return bits_ == 0; }
bool contains(E element) const { return (bits_ & Mask(element)) != 0; }
- bool contains_any(const EnumSet& set) const {
- return (bits_ & set.bits_) != 0;
- }
+ bool contains_any(EnumSet set) const { return (bits_ & set.bits_) != 0; }
void Add(E element) { bits_ |= Mask(element); }
- void Add(const EnumSet& set) { bits_ |= set.bits_; }
+ void Add(EnumSet set) { bits_ |= set.bits_; }
void Remove(E element) { bits_ &= ~Mask(element); }
- void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
+ void Remove(EnumSet set) { bits_ &= ~set.bits_; }
void RemoveAll() { bits_ = 0; }
- void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
+ void Intersect(EnumSet set) { bits_ &= set.bits_; }
T ToIntegral() const { return bits_; }
- bool operator==(const EnumSet& set) const { return bits_ == set.bits_; }
- bool operator!=(const EnumSet& set) const { return bits_ != set.bits_; }
- EnumSet operator|(const EnumSet& set) const {
- return EnumSet(bits_ | set.bits_);
- }
- EnumSet operator&(const EnumSet& set) const {
- return EnumSet(bits_ & set.bits_);
- }
+
+ bool operator==(EnumSet set) const { return bits_ == set.bits_; }
+ bool operator!=(EnumSet set) const { return bits_ != set.bits_; }
+
+ EnumSet operator|(EnumSet set) const { return EnumSet(bits_ | set.bits_); }
+ EnumSet operator&(EnumSet set) const { return EnumSet(bits_ & set.bits_); }
+ EnumSet operator-(EnumSet set) const { return EnumSet(bits_ & ~set.bits_); }
+
+ EnumSet& operator|=(EnumSet set) { return *this = *this | set; }
+ EnumSet& operator&=(EnumSet set) { return *this = *this & set; }
+ EnumSet& operator-=(EnumSet set) { return *this = *this - set; }
+
+ EnumSet operator|(E element) const { return EnumSet(bits_ | Mask(element)); }
+ EnumSet operator&(E element) const { return EnumSet(bits_ & Mask(element)); }
+ EnumSet operator-(E element) const { return EnumSet(bits_ & ~Mask(element)); }
+
+ EnumSet& operator|=(E element) { return *this = *this | element; }
+ EnumSet& operator&=(E element) { return *this = *this & element; }
+ EnumSet& operator-=(E element) { return *this = *this - element; }
static constexpr EnumSet FromIntegral(T bits) { return EnumSet{bits}; }
private:
explicit constexpr EnumSet(T bits) : bits_(bits) {}
- static T Mask(E element) {
+ static constexpr T Mask(E element) {
+#if V8_HAS_CXX14_CONSTEXPR
DCHECK_GT(sizeof(T) * 8, static_cast<int>(element));
+#endif
return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
}
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 5f52a9893e..e22dd00895 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -144,11 +144,11 @@ V8_INLINE Dest bit_cast(Source const& source) {
// Extract from 3.2.2 of C++11 spec:
// [...] A non-placement deallocation function for a class is
// odr-used by the definition of the destructor of that class, [...]
-#define DISALLOW_NEW_AND_DELETE() \
- void* operator new(size_t) { base::OS::Abort(); } \
- void* operator new[](size_t) { base::OS::Abort(); } \
- void operator delete(void*, size_t) { base::OS::Abort(); } \
- void operator delete[](void*, size_t) { base::OS::Abort(); }
+#define DISALLOW_NEW_AND_DELETE() \
+ void* operator new(size_t) { v8::base::OS::Abort(); } \
+ void* operator new[](size_t) { v8::base::OS::Abort(); } \
+ void operator delete(void*, size_t) { v8::base::OS::Abort(); } \
+ void operator delete[](void*, size_t) { v8::base::OS::Abort(); }
// Define V8_USE_ADDRESS_SANITIZER macro.
#if defined(__has_feature)
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 8d63dd2f84..3c0b2d1d1a 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -96,5 +96,23 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
+// static
+void* Stack::GetStackStart() {
+ pthread_attr_t attr;
+ int error;
+ pthread_attr_init(&attr);
+ error = pthread_attr_get_np(pthread_self(), &attr);
+ if (!error) {
+ void* base;
+ size_t size;
+ error = pthread_attr_getstack(&attr, &base, &size);
+ CHECK(!error);
+ pthread_attr_destroy(&attr);
+ return reinterpret_cast<uint8_t*>(base) + size;
+ }
+ pthread_attr_destroy(&attr);
+ return nullptr;
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 72a41c8491..bee6b30f7c 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -93,5 +93,10 @@ void OS::AdjustSchedulingParams() {
#endif
}
+// static
+void* Stack::GetStackStart() {
+ return pthread_get_stackaddr_np(pthread_self());
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 0424d4c135..1e600c7891 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -81,6 +81,10 @@ extern int madvise(caddr_t, size_t, int);
#define MADV_FREE MADV_DONTNEED
#endif
+#if defined(V8_LIBC_GLIBC)
+extern "C" void* __libc_stack_end; // NOLINT
+#endif
+
namespace v8 {
namespace base {
@@ -109,6 +113,8 @@ const int kMmapFd = -1;
const int kMmapFdOffset = 0;
+// TODO(v8:10026): Add the right permission flag to make executable pages
+// guarded.
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
@@ -464,7 +470,7 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
-#elif V8_HOST_ARCH_PPC
+#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
asm("int $3");
@@ -802,7 +808,7 @@ static void* ThreadEntry(void* arg) {
void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
+ strncpy(name_, name, sizeof(name_) - 1);
name_[sizeof(name_) - 1] = '\0';
}
@@ -961,6 +967,40 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
USE(result);
}
+// pthread_getattr_np used below is non portable (hence the _np suffix). We
+// keep this version in POSIX as most Linux-compatible derivatives will
+// support it. MacOS and FreeBSD are different here.
+#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX)
+
+// static
+void* Stack::GetStackStart() {
+ pthread_attr_t attr;
+ int error = pthread_getattr_np(pthread_self(), &attr);
+ if (!error) {
+ void* base;
+ size_t size;
+ error = pthread_attr_getstack(&attr, &base, &size);
+ CHECK(!error);
+ pthread_attr_destroy(&attr);
+ return reinterpret_cast<uint8_t*>(base) + size;
+ }
+ pthread_attr_destroy(&attr);
+
+#if defined(V8_LIBC_GLIBC)
+ // pthread_getattr_np can fail for the main thread. In this case
+ // just like NaCl we rely on the __libc_stack_end to give us
+ // the start of the stack.
+ // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
+ return __libc_stack_end;
+#endif // !defined(V8_LIBC_GLIBC)
+ return nullptr;
+}
+
+#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX)
+
+// static
+void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }
+
#undef LOG_TAG
#undef MAP_ANONYMOUS
#undef MADV_FREE
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 04ef8a30f2..5db3e34310 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -1394,5 +1394,31 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
void OS::AdjustSchedulingParams() {}
+// static
+void* Stack::GetStackStart() {
+#if defined(V8_TARGET_ARCH_X64)
+ return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase)));
+#elif defined(V8_TARGET_ARCH_32_BIT)
+ return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase)));
+#elif defined(V8_TARGET_ARCH_ARM64)
+ // Windows 8 and later, see
+ // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getcurrentthreadstacklimits
+ ULONG_PTR lowLimit, highLimit;
+ ::GetCurrentThreadStackLimits(&lowLimit, &highLimit);
+ return reinterpret_cast<void*>(highLimit);
+#else
+#error Unsupported GetStackStart.
+#endif
+}
+
+// static
+void* Stack::GetCurrentStackPosition() {
+#if V8_CC_MSVC
+ return _AddressOfReturnAddress();
+#else
+ return __builtin_frame_address(0);
+#endif
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index e1f84043eb..af55036a00 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -35,6 +35,10 @@
#include "src/base/qnx-math.h"
#endif
+#ifdef V8_USE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif // V8_USE_ADDRESS_SANITIZER
+
namespace v8 {
namespace base {
@@ -407,6 +411,38 @@ class V8_BASE_EXPORT Thread {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
+// TODO(v8:10354): Make use of the stack utilities here in V8.
+class V8_BASE_EXPORT Stack {
+ public:
+ // Gets the start of the stack of the current thread.
+ static void* GetStackStart();
+
+ // Returns the current stack top. Works correctly with ASAN and SafeStack.
+ // GetCurrentStackPosition() should not be inlined, because it works on stack
+ // frames if it were inlined into a function with a huge stack frame it would
+ // return an address significantly above the actual current stack position.
+ static V8_NOINLINE void* GetCurrentStackPosition();
+
+ // Translates an ASAN-based slot to a real stack slot if necessary.
+ static void* GetStackSlot(void* slot) {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ void* fake_stack = __asan_get_current_fake_stack();
+ if (fake_stack) {
+ void* fake_frame_start;
+ void* real_frame = __asan_addr_is_in_fake_stack(
+ fake_stack, slot, &fake_frame_start, nullptr);
+ if (real_frame) {
+ return reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(real_frame) +
+ (reinterpret_cast<uintptr_t>(slot) -
+ reinterpret_cast<uintptr_t>(fake_frame_start)));
+ }
+ }
+#endif // V8_USE_ADDRESS_SANITIZER
+ return slot;
+ }
+};
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 93caa8847e..49f578d1fd 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -6,15 +6,15 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -22,6 +22,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -65,10 +66,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -82,7 +89,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
@@ -413,7 +420,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmp(sp, scratch);
__ b(lo, &stack_overflow);
@@ -926,18 +933,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode;
@@ -964,7 +980,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&process_bytecode);
-// Bailout to the return label if this is a return bytecode.
+ // Bailout to the return label if this is a return bytecode.
// Create cmp, cmpne, ..., cmpne to check for a return bytecode.
Condition flag = al;
@@ -977,9 +993,22 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ b(if_return, eq);
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ b(ne, &not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ b(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1085,7 +1114,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ sub(r9, sp, Operand(r4));
- LoadRealStackLimit(masm, r2);
+ LoadStackLimit(masm, r2, StackLimitKind::kRealStackLimit);
__ cmp(r9, Operand(r2));
__ b(lo, &stack_overflow);
@@ -1111,6 +1140,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r9, Operand::Zero());
__ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, r4, StackLimitKind::kInterruptStackLimit);
+ __ cmp(sp, r4);
+ __ b(lo, &stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1143,7 +1180,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2,
+ kInterpreterBytecodeOffsetRegister, r1, r2, r3,
&do_return);
__ jmp(&do_dispatch);
@@ -1152,6 +1189,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ str(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
+ __ str(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1354,6 +1415,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(ge, &okay);
+ __ bkpt(0);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1373,6 +1443,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ b(eq, &function_entry_bytecode);
+
// Load the current bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1380,15 +1456,25 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2,
+ kInterpreterBytecodeOffsetRegister, r1, r2, r3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1994,7 +2080,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative.
- LoadRealStackLimit(masm, remaining_stack_size);
+ LoadStackLimit(masm, remaining_stack_size,
+ StackLimitKind::kRealStackLimit);
__ sub(remaining_stack_size, sp, remaining_stack_size);
// Check if the arguments will overflow the stack.
@@ -2228,7 +2315,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmp(r2, Operand(kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
@@ -2433,6 +2520,35 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(r8);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(
+ WasmDebugBreakFrameConstants::kFirstPushedFpReg);
+ constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(
+ WasmDebugBreakFrameConstants::kLastPushedFpReg);
+
+ // Store gp parameter registers.
+ __ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
+ // Store fp parameter registers.
+ __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
+ __ ldm(ia_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index d4a4cbe0eb..9c38ae085e 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -6,15 +6,15 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -22,6 +22,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
#if defined(V8_OS_WIN)
@@ -34,6 +35,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
+ __ CodeEntry();
+
__ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@@ -65,10 +68,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -85,7 +94,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ Sub(scratch, sp, scratch);
@@ -465,7 +474,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, x10);
+ LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
__ Cmp(sp, x10);
__ B(lo, &stack_overflow);
@@ -689,10 +698,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// that.
{
Assembler::BlockPoolsScope block_pools(masm);
- __ bind(&handler_entry);
// Store the current pc as the handler offset. It's used later to create the
// handler table.
+ __ BindExceptionHandler(&handler_entry);
masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
// Caught exception: Store result (exception) in the pending exception
@@ -1049,17 +1058,26 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Mov(bytecode_size_table, ExternalReference::bytecode_size_table_address());
+ __ Mov(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -1096,9 +1114,22 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ B(ne, &not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Mov(bytecode_offset, original_bytecode_offset);
+ __ B(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
+
+ __ Bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1180,7 +1211,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
__ Bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp, cp, closure);
+ __ Push<TurboAssembler::kSignLR>(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Reset code age.
@@ -1213,7 +1244,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ Cmp(x10, scratch);
}
__ B(lo, &stack_overflow);
@@ -1243,6 +1274,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
__ Bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, x10, StackLimitKind::kInterruptStackLimit);
+ __ Cmp(sp, x10);
+ __ B(lo, &stack_check_interrupt);
+ __ Bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1274,7 +1313,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2,
+ kInterpreterBytecodeOffsetRegister, x1, x2, x3,
&do_return);
__ B(&do_dispatch);
@@ -1283,6 +1322,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Str(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(x10, kInterpreterBytecodeOffsetRegister);
+ __ Str(x10, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1515,6 +1578,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ B(ge, &okay);
+ __ Unreachable();
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1531,6 +1603,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ B(eq, &function_entry_bytecode);
+
// Load the current bytecode.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1538,15 +1616,25 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2,
+ kInterpreterBytecodeOffsetRegister, x1, x2, x3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ B(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1608,7 +1696,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore fp, lr.
__ Mov(sp, fp);
- __ Pop(fp, lr);
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
__ LoadEntryFromBuiltinIndex(builtin);
__ Jump(builtin);
@@ -1989,7 +2077,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
namespace {
void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ Push(lr, fp);
+ __ Push<TurboAssembler::kSignLR>(lr, fp);
__ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
@@ -2005,7 +2093,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Mov(sp, fp);
- __ Pop(fp, lr);
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
// Drop actual parameters and receiver.
__ SmiUntag(x10);
@@ -2375,7 +2463,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
- LoadRealStackLimit(masm, x10);
+ LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
@@ -2714,7 +2802,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label create_adaptor_frame, dont_adapt_arguments, stack_overflow,
adapt_arguments_in_place;
- __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
// When the difference between argc_actual and argc_expected is odd, we
@@ -2950,6 +3038,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(x8);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ PushXRegList(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ PushDRegList(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ PopDRegList(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ PopXRegList(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3577,9 +3687,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
- __ Poke(lr, 0); // Store the return address.
+ __ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address.
__ Blr(x10); // Call the C++ function.
- __ Peek(lr, 0); // Return to calling code.
+ __ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ AssertFPCRState();
__ Ret();
}
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index 0ba3ea4030..52bcc75d19 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -61,9 +61,8 @@ namespace array {
GetFromIndex(context: Context, length: Number, arguments: Arguments): Number {
// 4. If fromIndex is present, let n be ? ToInteger(fromIndex);
// else let n be len - 1.
- const n: Number = arguments.length < 2 ?
- length - 1 :
- ToInteger_Inline(arguments[1], kTruncateMinusZero);
+ const n: Number =
+ arguments.length < 2 ? length - 1 : ToInteger_Inline(arguments[1]);
// 5. If n >= 0, then.
let k: Number = SmiConstant(0);
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 3aa75c7413..7d87a55e88 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -29,6 +29,8 @@ type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
type StrongTagged extends Tagged
generates 'TNode<Object>' constexpr 'ObjectPtr';
type Smi extends StrongTagged generates 'TNode<Smi>' constexpr 'Smi';
+type TaggedIndex extends StrongTagged
+ generates 'TNode<TaggedIndex>' constexpr 'TaggedIndex';
// A possibly cleared weak pointer with a bit pattern that distinguishes it from
// strong HeapObject pointers and Smi values.
type WeakHeapObject extends Tagged;
@@ -85,7 +87,7 @@ type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32
generates 'TNode<Int32T>' constexpr 'int31_t';
type uint31 extends uint32
- generates 'TNode<Uint32T>' constexpr 'uint31_t';
+ generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int16 extends int31
generates 'TNode<Int16T>' constexpr 'int16_t';
type uint16 extends uint31
@@ -104,6 +106,9 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
+// A Smi value containing a bitfield struct as its integer data.
+type SmiTagged<T : type extends uint31> extends Smi;
+
// WARNING: The memory representation (i.e., in class fields and arrays) of
// float64_or_hole is just a float64 that may be the hole-representing
// signalling NaN bit-pattern. So it's memory size is that of float64 and
@@ -156,7 +161,7 @@ type LayoutDescriptor extends ByteArray
generates 'TNode<LayoutDescriptor>';
extern class TransitionArray extends WeakFixedArray;
-type InstanceType extends uint16 constexpr 'v8::internal::InstanceType';
+type InstanceType extends uint16 constexpr 'InstanceType';
type NoSharedNameSentinel extends Smi;
@@ -177,9 +182,6 @@ type Callable = JSFunction|JSBoundFunction|CallableJSProxy|CallableApiObject;
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
-type ToIntegerTruncationMode
-constexpr 'CodeStubAssembler::ToIntegerTruncationMode';
-
extern enum UnicodeEncoding { UTF16, UTF32 }
// Promise constants
@@ -345,10 +347,6 @@ const kSloppyArgumentsContextIndex: constexpr int31
const kSloppyArgumentsParameterMapStart: constexpr int31
generates 'SloppyArgumentsElements::kParameterMapStart';
-const kTruncateMinusZero: constexpr ToIntegerTruncationMode
- generates 'CodeStubAssembler::ToIntegerTruncationMode::kTruncateMinusZero'
- ;
-
extern enum PrimitiveType { kString, kBoolean, kSymbol, kNumber }
const kNameDictionaryInitialCapacity:
@@ -433,10 +431,53 @@ extern macro Comment(constexpr string);
extern macro StaticAssert(bool);
extern macro Print(Object);
extern macro DebugBreak();
-extern transitioning macro ToInteger_Inline(implicit context: Context)(JSAny):
- Number;
-extern transitioning macro ToInteger_Inline(implicit context: Context)(
- JSAny, constexpr ToIntegerTruncationMode): Number;
+
+// ES6 7.1.4 ToInteger ( argument )
+transitioning macro ToIntegerImpl(implicit context: Context)(input: Object):
+ Number {
+ let input = input;
+
+ while (true) {
+ typeswitch (input) {
+ case (s: Smi): {
+ return s;
+ }
+ case (hn: HeapNumber): {
+ let value = Convert<float64>(hn);
+ if (Float64IsNaN(value)) return SmiConstant(0);
+ value = math::Float64Trunc(value);
+ // ToInteger normalizes -0 to +0.
+ if (value == 0.0) return SmiConstant(0);
+ const result = ChangeFloat64ToTagged(value);
+ assert(IsNumberNormalized(result));
+ return result;
+ }
+ case (ho: HeapObject): {
+ input = math::NonNumberToNumber(ho);
+ }
+ }
+ }
+ unreachable;
+}
+
+transitioning builtin ToInteger(implicit context: Context)(input: Object):
+ Number {
+ return ToIntegerImpl(input);
+}
+
+@export
+transitioning macro ToInteger_Inline(implicit context: Context)(input: Object):
+ Number {
+ typeswitch (input) {
+ case (s: Smi): {
+ return s;
+ }
+ case (ho: HeapObject): {
+ return ToInteger(ho);
+ }
+ }
+}
+
extern transitioning macro ToLength_Inline(implicit context: Context)(JSAny):
Number;
extern transitioning macro ToNumber_Inline(implicit context: Context)(JSAny):
@@ -459,7 +500,8 @@ extern transitioning builtin HasProperty(implicit context: Context)(
JSAny, JSAny): Boolean;
extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, JSAny): Boolean;
-extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny;
+extern builtin LoadIC(
+ Context, JSAny, JSAny, TaggedIndex, FeedbackVector): JSAny;
extern macro CollectCallFeedback(
JSAny, Context, Undefined | FeedbackVector, uintptr);
@@ -514,7 +556,6 @@ extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
extern builtin ToObject(Context, JSAny): JSReceiver;
extern macro ToObject_Inline(Context, JSAny): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
-extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
transitioning builtin ToString(context: Context, o: JSAny): String {
return ToStringImpl(context, o);
@@ -724,6 +765,10 @@ ConstexprInt31NotEqual(constexpr int31, constexpr int31): constexpr bool;
extern operator '>=' macro
ConstexprInt31GreaterThanEqual(
constexpr int31, constexpr int31): constexpr bool;
+extern operator '==' macro ConstexprInt32Equal(
+ constexpr int32, constexpr int32): constexpr bool;
+extern operator '!=' macro ConstexprInt32NotEqual(
+ constexpr int32, constexpr int32): constexpr bool;
extern operator '==' macro Word32Equal(int32, int32): bool;
extern operator '==' macro Word32Equal(uint32, uint32): bool;
@@ -833,7 +878,14 @@ extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
extern macro SmiFromUint32(uint32): Smi;
extern macro SmiUntag(Smi): intptr;
+macro SmiUntag<T: type>(value: SmiTagged<T>): T {
+ return %RawDownCast<T>(Unsigned(SmiToInt32(Convert<Smi>(value))));
+}
extern macro SmiToInt32(Smi): int32;
+extern macro TaggedIndexToIntPtr(TaggedIndex): intptr;
+extern macro IntPtrToTaggedIndex(intptr): TaggedIndex;
+extern macro TaggedIndexToSmi(TaggedIndex): Smi;
+extern macro SmiToTaggedIndex(Smi): TaggedIndex;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
@@ -888,6 +940,10 @@ macro UnsafeCast<A : type extends Object>(implicit context: Context)(o: Object):
return %RawDownCast<A>(o);
}
+macro UnsafeConstCast<T: type>(r: const &T):&T {
+ return %RawDownCast<&T>(r);
+}
+
extern macro FixedArrayMapConstant(): Map;
extern macro FixedDoubleArrayMapConstant(): Map;
extern macro FixedCOWArrayMapConstant(): Map;
@@ -1206,7 +1262,7 @@ macro ChangeSafeIntegerNumberToUintPtr(value: Number):
transitioning macro ToUintPtr(implicit context: Context)(value: JSAny):
uintptr labels IfLessThanZero, IfUIntPtrOverflow, IfSafeIntegerOverflow {
if (value == Undefined) return 0;
- const indexNumber = ToInteger_Inline(value, kTruncateMinusZero);
+ const indexNumber = ToInteger_Inline(value);
return TryNumberToUintPtr(indexNumber, kModeValueIsAnyNumber)
otherwise IfLessThanZero, IfUIntPtrOverflow, IfSafeIntegerOverflow;
}
@@ -1220,7 +1276,7 @@ transitioning macro ToUintPtr(implicit context: Context)(value: JSAny):
transitioning macro ToIndex(implicit context: Context)(value: JSAny):
uintptr labels IfRangeError {
if (value == Undefined) return 0;
- const indexNumber = ToInteger_Inline(value, kTruncateMinusZero);
+ const indexNumber = ToInteger_Inline(value);
// Less than 0 case, uintptr range overflow and safe integer range overflow
// imply IfRangeError.
return TryNumberToUintPtr(indexNumber, kModeValueIsAnyNumber)
@@ -1293,7 +1349,7 @@ extern macro IsOneByteStringInstanceType(InstanceType): bool;
@export
transitioning macro ConvertToRelativeIndex(implicit context: Context)(
index: JSAny, length: uintptr): uintptr {
- const indexNumber: Number = ToInteger_Inline(index, kTruncateMinusZero);
+ const indexNumber: Number = ToInteger_Inline(index);
return ConvertToRelativeIndex(indexNumber, length);
}
@@ -1340,7 +1396,7 @@ macro ConvertToRelativeIndex(indexNumber: Number, length: uintptr): uintptr {
@export
transitioning macro ClampToIndexRange(implicit context: Context)(
index: JSAny, limit: uintptr): uintptr {
- const indexNumber: Number = ToInteger_Inline(index, kTruncateMinusZero);
+ const indexNumber: Number = ToInteger_Inline(index);
return ClampToIndexRange(indexNumber, limit);
}
@@ -1560,6 +1616,29 @@ builtin CheckNumberInRange(implicit context: Context)(
}
}
+// Assert that the objects satisfy SameValue or are both the hole.
+builtin CheckSameObject(implicit context: Context)(
+ lhs: Object, rhs: Object): Undefined {
+ typeswitch (lhs) {
+ case (TheHole): {
+ if (rhs == TheHole) return Undefined;
+ }
+ case (a: JSAny): {
+ typeswitch (rhs) {
+ case (b: JSAny): {
+ if (SameValue(a, b)) return Undefined;
+ }
+ case (Object): {
+ }
+ }
+ }
+ case (Object): {
+ }
+ }
+ Print('Distinct or unexpected values in CheckSameObject');
+ unreachable;
+}
+
macro ReplaceTheHoleWithUndefined(o: JSAny|TheHole): JSAny {
typeswitch (o) {
case (TheHole): {
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 0c30e52154..6eb6f87c74 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -105,9 +105,9 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
Object data_obj = call_data.data();
- FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
- *new_target, args.address_of_arg_at(1),
- args.length() - 1);
+ FunctionCallbackArguments custom(
+ isolate, data_obj, *function, raw_holder, *new_target,
+ args.address_of_first_argument(), args.length() - 1);
Handle<Object> result = custom.Call(call_data);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
@@ -206,6 +206,18 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
} else {
argv = new Address[frame_argc];
}
+#ifdef V8_REVERSE_JSARGS
+ argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
+ argv[BuiltinArguments::kTargetOffset] = function->ptr();
+ argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
+ argv[BuiltinArguments::kPaddingOffset] =
+ ReadOnlyRoots(isolate).the_hole_value().ptr();
+ int cursor = BuiltinArguments::kNumExtraArgs;
+ argv[cursor++] = receiver->ptr();
+ for (int i = 0; i < argc; ++i) {
+ argv[cursor++] = args[i]->ptr();
+ }
+#else
int cursor = frame_argc - 1;
argv[cursor--] = receiver->ptr();
for (int i = 0; i < argc; ++i) {
@@ -217,6 +229,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
argv[BuiltinArguments::kTargetOffset] = function->ptr();
argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
+#endif
MaybeHandle<Object> result;
{
RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
@@ -269,9 +282,9 @@ V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor(
{
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
- FunctionCallbackArguments custom(isolate, call_data.data(), constructor,
- obj, new_target, args.address_of_arg_at(1),
- args.length() - 1);
+ FunctionCallbackArguments custom(
+ isolate, call_data.data(), constructor, obj, new_target,
+ args.address_of_first_argument(), args.length() - 1);
Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
result = ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 1892406305..734b9b634a 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -50,8 +50,7 @@ TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
TNode<Number> k_number = ChangeUintPtrToTagged(k);
TNode<Object> mapped_value =
- CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
- k_value, k_number, o());
+ Call(context(), callbackfn(), this_arg(), k_value, k_number, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
// 8. d. Perform ? Set(A, Pk, mapped_value, true).
@@ -544,8 +543,8 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
};
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
@@ -1071,8 +1070,8 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
}
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIncludes, argc, context);
@@ -1112,8 +1111,8 @@ TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
}
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIndexOf, argc, context);
@@ -1420,9 +1419,9 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// 1. Set element to ? Call(mapperFunction, thisArg , « element,
// sourceIndex, source »).
- element_maybe_smi = CallJS(CodeFactory::Call(isolate()), context,
- mapper_function.value(), this_arg.value(),
- element_maybe_smi, source_index, source);
+ element_maybe_smi =
+ Call(context, mapper_function.value(), this_arg.value(),
+ element_maybe_smi, source_index, source);
}
// iii. Let shouldFlatten be false.
@@ -1541,8 +1540,8 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
- const TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
const TNode<Object> receiver = args.GetReceiver();
@@ -1583,8 +1582,8 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
- const TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
const TNode<Object> receiver = args.GetReceiver();
@@ -1871,8 +1870,8 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
BIND(&call_runtime);
{
- TailCallRuntime(Runtime::kNewArray, context, array_function, array_size,
- array_function, allocation_site);
+ TailCallRuntimeNewArray(context, array_function, array_size, array_function,
+ allocation_site);
}
}
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index acc7721465..592400415b 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -311,8 +311,8 @@ AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
@@ -329,8 +329,8 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
@@ -347,8 +347,8 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 1515605649..b138515af6 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -13,8 +13,6 @@
namespace v8 {
namespace internal {
-using compiler::Node;
-
namespace {
class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
public:
@@ -130,9 +128,11 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
BIND(&if_isnotundefined);
}
- const TNode<Object> iter_result = CallJS(
- CodeFactory::Call(isolate()), context, method, sync_iterator, sent_value);
- GotoIfException(iter_result, &reject_promise, &var_exception);
+ TNode<Object> iter_result;
+ {
+ ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
+ iter_result = Call(context, method, sync_iterator, sent_value);
+ }
TNode<Object> value;
TNode<Oddball> done;
@@ -144,10 +144,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
CSA_ASSERT(this, IsConstructor(promise_fun));
// Let valueWrapper be PromiseResolve(%Promise%, « value »).
- const TNode<Object> value_wrapper = CallBuiltin(
- Builtins::kPromiseResolve, native_context, promise_fun, value);
// IfAbruptRejectPromise(valueWrapper, promiseCapability).
- GotoIfException(value_wrapper, &reject_promise, &var_exception);
+ TNode<Object> value_wrapper;
+ {
+ ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
+ value_wrapper = CallBuiltin(Builtins::kPromiseResolve, native_context,
+ promise_fun, value);
+ }
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
@@ -200,17 +203,17 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&if_slowpath);
{
+ ScopedExceptionHandler handler(this, if_exception, var_exception);
+
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
const TNode<Object> done =
GetProperty(context, iter_result, factory()->done_string());
- GotoIfException(done, if_exception, var_exception);
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
const TNode<Object> value =
GetProperty(context, iter_result, factory()->value_string());
- GotoIfException(value, if_exception, var_exception);
var_value = value;
var_done = done;
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index cfe82594df..1b53e9ca8e 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -65,14 +65,14 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
}
void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
- TNode<Object> target, SloppyTNode<Object> new_target,
+ TNode<Object> target, base::Optional<TNode<Object>> new_target,
TNode<Object> arguments_list, TNode<Context> context) {
Label if_done(this), if_arguments(this), if_array(this),
if_holey_array(this, Label::kDeferred),
if_runtime(this, Label::kDeferred);
// Perform appropriate checks on {target} (and {new_target} first).
- if (new_target == nullptr) {
+ if (!new_target) {
// Check that {target} is Callable.
Label if_target_callable(this),
if_target_not_callable(this, Label::kDeferred);
@@ -102,12 +102,12 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
// Check that {new_target} is a Constructor.
Label if_new_target_constructor(this),
if_new_target_not_constructor(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(new_target), &if_new_target_not_constructor);
- Branch(IsConstructor(CAST(new_target)), &if_new_target_constructor,
+ GotoIf(TaggedIsSmi(*new_target), &if_new_target_not_constructor);
+ Branch(IsConstructor(CAST(*new_target)), &if_new_target_constructor,
&if_new_target_not_constructor);
BIND(&if_new_target_not_constructor);
{
- CallRuntime(Runtime::kThrowNotConstructor, context, new_target);
+ CallRuntime(Runtime::kThrowNotConstructor, context, *new_target);
Unreachable();
}
BIND(&if_new_target_constructor);
@@ -215,12 +215,12 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_not_double);
{
- if (new_target == nullptr) {
+ if (!new_target) {
Callable callable = CodeFactory::CallVarargs(isolate());
TailCallStub(callable, context, target, args_count, length, elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
+ TailCallStub(callable, context, target, *new_target, args_count, length,
elements);
}
}
@@ -240,7 +240,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
// boxed as HeapNumbers, then tail calls CallVarargs/ConstructVarargs depending
// on whether {new_target} was passed.
void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
- TNode<Object> target, SloppyTNode<Object> new_target,
+ TNode<Object> target, base::Optional<TNode<Object>> new_target,
TNode<FixedDoubleArray> elements, TNode<Int32T> length,
TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
const ElementsKind new_kind = PACKED_ELEMENTS;
@@ -258,19 +258,19 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
new_elements, intptr_length, intptr_length,
barrier_mode);
- if (new_target == nullptr) {
+ if (!new_target) {
Callable callable = CodeFactory::CallVarargs(isolate());
TailCallStub(callable, context, target, args_count, length, new_elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
+ TailCallStub(callable, context, target, *new_target, args_count, length,
new_elements);
}
}
void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
- TNode<Object> target, TNode<Object> new_target, TNode<Object> spread,
- TNode<Int32T> args_count, TNode<Context> context) {
+ TNode<Object> target, base::Optional<TNode<Object>> new_target,
+ TNode<Object> spread, TNode<Int32T> args_count, TNode<Context> context) {
Label if_smiorobject(this), if_double(this),
if_generic(this, Label::kDeferred);
@@ -316,7 +316,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_generic);
{
- Label if_iterator_fn_not_callable(this, Label::kDeferred);
+ Label if_iterator_fn_not_callable(this, Label::kDeferred),
+ if_iterator_is_null_or_undefined(this, Label::kDeferred);
+
+ GotoIf(IsNullOrUndefined(spread), &if_iterator_is_null_or_undefined);
+
TNode<Object> iterator_fn =
GetProperty(context, spread, IteratorSymbolConstant());
GotoIfNot(TaggedIsCallable(iterator_fn), &if_iterator_fn_not_callable);
@@ -333,6 +337,10 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_iterator_fn_not_callable);
ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
+
+ BIND(&if_iterator_is_null_or_undefined);
+ CallRuntime(Runtime::kThrowSpreadArgIsNullOrUndefined, context, spread);
+ Unreachable();
}
BIND(&if_smiorobject);
@@ -342,12 +350,12 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
CSA_ASSERT(this, Int32LessThanOrEqual(
length, Int32Constant(FixedArray::kMaxLength)));
- if (new_target == nullptr) {
+ if (!new_target) {
Callable callable = CodeFactory::CallVarargs(isolate());
TailCallStub(callable, context, target, args_count, length, elements);
} else {
Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count, length,
+ TailCallStub(callable, context, target, *new_target, args_count, length,
elements);
}
}
@@ -363,7 +371,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = nullptr;
+ base::Optional<TNode<Object>> new_target = base::nullopt;
TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
@@ -371,7 +379,7 @@ TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = nullptr;
+ base::Optional<TNode<Object>> new_target = base::nullopt;
TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
TNode<Int32T> args_count =
UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
index a15f31dd09..d54e4405e0 100644
--- a/deps/v8/src/builtins/builtins-call-gen.h
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -16,16 +16,17 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
void CallOrConstructWithArrayLike(TNode<Object> target,
- SloppyTNode<Object> new_target,
+ base::Optional<TNode<Object>> new_target,
TNode<Object> arguments_list,
TNode<Context> context);
void CallOrConstructDoubleVarargs(TNode<Object> target,
- SloppyTNode<Object> new_target,
+ base::Optional<TNode<Object>> new_target,
TNode<FixedDoubleArray> elements,
TNode<Int32T> length,
TNode<Int32T> args_count,
TNode<Context> context, TNode<Int32T> kind);
- void CallOrConstructWithSpread(TNode<Object> target, TNode<Object> new_target,
+ void CallOrConstructWithSpread(TNode<Object> target,
+ base::Optional<TNode<Object>> new_target,
TNode<Object> spread, TNode<Int32T> args_count,
TNode<Context> context);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index df758e0e9f..df0ebce993 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -14,6 +14,7 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -150,8 +151,7 @@ void BaseCollectionsAssembler::AddConstructorEntry(
TNode<Object> add_function, TNode<Object> key_value,
Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
- compiler::CodeAssemblerScopedExceptionHandler handler(this, if_exception,
- var_exception);
+ compiler::ScopedExceptionHandler handler(this, if_exception, var_exception);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
if (variant == kMap || variant == kWeakMap) {
TorqueStructKeyValuePair pair =
@@ -161,12 +161,10 @@ void BaseCollectionsAssembler::AddConstructorEntry(
: LoadKeyValuePair(context, key_value);
TNode<Object> key_n = pair.key;
TNode<Object> value_n = pair.value;
- CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
- key_n, value_n);
+ Call(context, add_function, collection, key_n, value_n);
} else {
DCHECK(variant == kSet || variant == kWeakSet);
- CallJS(CodeFactory::Call(isolate()), context, add_function, collection,
- key_value);
+ Call(context, add_function, collection, key_value);
}
}
@@ -854,8 +852,8 @@ TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateTable(
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kMap, isolate()->factory()->Map_string(), new_target,
@@ -864,8 +862,8 @@ TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kSet, isolate()->factory()->Set_string(), new_target,
@@ -2035,8 +2033,7 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
// Invoke the {callback} passing the {entry_key}, {entry_value} and the
// {receiver}.
- CallJS(CodeFactory::Call(isolate()), context, callback, this_arg,
- entry_value, entry_key, receiver);
+ Call(context, callback, this_arg, entry_value, entry_key, receiver);
// Continue with the next entry.
var_index = index;
@@ -2266,8 +2263,7 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
NextSkipHoles<OrderedHashSet>(table, index, &done_loop);
// Invoke the {callback} passing the {entry_key} (twice) and the {receiver}.
- CallJS(CodeFactory::Call(isolate()), context, callback, this_arg, entry_key,
- entry_key, receiver);
+ Call(context, callback, this_arg, entry_key, entry_key, receiver);
// Continue with the next entry.
var_index = index;
@@ -2513,8 +2509,9 @@ TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable(
TNode<FixedArray> table = CAST(
AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
- RootIndex map_root_index = EphemeronHashTableShape::GetMapRootIndex();
- StoreMapNoWriteBarrier(table, map_root_index);
+ TNode<Map> map =
+ HeapConstant(EphemeronHashTableShape::GetMap(ReadOnlyRoots(isolate())));
+ StoreMapNoWriteBarrier(table, map);
StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table,
@@ -2703,8 +2700,8 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string(),
@@ -2713,8 +2710,8 @@ TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string(),
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 01f3db63f3..c706ce9306 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -38,7 +38,7 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
@@ -46,7 +46,7 @@ TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- SloppyTNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
TNode<Int32T> args_count =
UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
@@ -160,8 +160,8 @@ TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
}
TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
- SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target) {
+ TNode<Context> context, TNode<JSFunction> target,
+ TNode<JSReceiver> new_target) {
TVARIABLE(JSObject, var_obj);
Label call_runtime(this), end(this);
@@ -177,8 +177,8 @@ TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
}
TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
- SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target, Label* call_runtime) {
+ TNode<Context> context, TNode<JSFunction> target,
+ TNode<JSReceiver> new_target, Label* call_runtime) {
// Verify that the new target is a JSFunction.
Label end(this);
TNode<JSFunction> new_target_func =
@@ -284,7 +284,7 @@ TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
}
TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
- TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector, TNode<TaggedIndex> slot,
TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context) {
Label call_runtime(this, Label::kDeferred), end(this);
@@ -311,8 +311,7 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
BIND(&call_runtime);
{
result = CAST(CallRuntime(Runtime::kCreateRegExpLiteral, context,
- maybe_feedback_vector, SmiTag(Signed(slot)),
- pattern, flags));
+ maybe_feedback_vector, slot, pattern, flags));
Goto(&end);
}
@@ -323,7 +322,7 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
TNode<HeapObject> maybe_feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -333,7 +332,7 @@ TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
}
TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode) {
Label zero_capacity(this), cow_elements(this), fast_elements(this),
@@ -356,7 +355,7 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<ArrayBoilerplateDescription> constant_elements =
CAST(Parameter(Descriptor::kConstantElements));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -371,13 +370,12 @@ TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
int const flags =
AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow;
Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(Signed(slot)), constant_elements,
- SmiConstant(flags)));
+ slot, constant_elements, SmiConstant(flags)));
}
}
TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context) {
// Array literals always have a valid AllocationSite to properly track
// elements transitions.
@@ -395,8 +393,10 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
// TODO(cbruni): create the AllocationSite in CSA.
BIND(&initialize_allocation_site);
{
- allocation_site =
- CreateAllocationSiteInFeedbackVector(feedback_vector, slot);
+ allocation_site = CreateAllocationSiteInFeedbackVector(
+ feedback_vector,
+ // TODO(v8:10047): pass slot as TaggedIndex here
+ Unsigned(TaggedIndexToIntPtr(slot)));
Goto(&create_empty_array);
}
@@ -421,7 +421,7 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSArray> result =
EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
@@ -429,7 +429,7 @@ TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
}
TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
Label* call_runtime) {
TNode<Object> maybe_allocation_site =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
@@ -609,7 +609,7 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
Label call_runtime(this);
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
- TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> copy =
EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime);
Return(copy);
@@ -619,8 +619,8 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
CAST(Parameter(Descriptor::kObjectBoilerplateDescription));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
- SmiTag(Signed(slot)), object_boilerplate_description, flags);
+ TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, slot,
+ object_boilerplate_description, flags);
}
// Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
@@ -644,8 +644,8 @@ TNode<JSObject> ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
// ES #sec-object-constructor
TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
int const kValueArg = 0;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
@@ -694,8 +694,8 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
// 1. If no arguments were passed to this function invocation, let n be +0.
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 761a6c7adb..d6a698ddf6 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -21,30 +21,30 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
ScopeType scope_type);
TNode<JSRegExp> EmitCreateRegExpLiteral(
- TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector, TNode<TaggedIndex> slot,
TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context);
TNode<JSArray> EmitCreateShallowArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode);
TNode<JSArray> EmitCreateEmptyArrayLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context);
TNode<HeapObject> EmitCreateShallowObjectLiteral(
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
Label* call_runtime);
TNode<JSObject> EmitCreateEmptyObjectLiteral(TNode<Context> context);
- TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
- SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target);
+ TNode<JSObject> EmitFastNewObject(TNode<Context> context,
+ TNode<JSFunction> target,
+ TNode<JSReceiver> new_target);
- TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
- SloppyTNode<JSFunction> target,
- SloppyTNode<JSReceiver> new_target,
+ TNode<JSObject> EmitFastNewObject(TNode<Context> context,
+ TNode<JSFunction> target,
+ TNode<JSReceiver> new_target,
Label* call_runtime);
};
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 4d0fead861..e524f39b5f 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -39,12 +39,9 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
{
// Invoke the {exotic_to_prim} method on the {input} with a string
// representation of the {hint}.
- Callable callable =
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined);
TNode<String> hint_string =
HeapConstant(factory()->ToPrimitiveHintString(hint));
- TNode<Object> result =
- CallJS(callable, context, exotic_to_prim, input, hint_string);
+ TNode<Object> result = Call(context, exotic_to_prim, input, hint_string);
// Verify that the {result} is actually a primitive.
Label if_resultisprimitive(this),
@@ -248,9 +245,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
BIND(&if_methodiscallable);
{
// Call the {method} on the {input}.
- Callable callable = CodeFactory::Call(
- isolate(), ConvertReceiverMode::kNotNullOrUndefined);
- TNode<Object> result = CallJS(callable, context, method, input);
+ TNode<Object> result = Call(context, method, input);
var_result = result;
// Return the {result} if it is a primitive.
@@ -381,20 +376,6 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
}
}
-TF_BUILTIN(ToInteger, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(ToInteger(context, input, kNoTruncation));
-}
-
-TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(ToInteger(context, input, kTruncateMinusZero));
-}
-
// ES6 section 7.1.13 ToObject (argument)
TF_BUILTIN(ToObject, CodeStubAssembler) {
Label if_smi(this, Label::kDeferred), if_jsreceiver(this),
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 1e2cfb9a31..7ed38062c8 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -180,7 +180,7 @@ namespace internal {
TFC(NewArgumentsElements, NewArgumentsElements) \
\
/* Debugger */ \
- TFJ(DebugBreakTrampoline, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(DebugBreakTrampoline, kDontAdaptArgumentsSentinel) \
ASM(FrameDropperTrampoline, FrameDropperTrampoline) \
ASM(HandleDebuggerStatement, ContextOnly) \
\
@@ -200,8 +200,6 @@ namespace internal {
TFC(ToNumberConvertBigInt, TypeConversion) \
TFC(ToNumeric, TypeConversion) \
TFC(NumberToString, TypeConversion) \
- TFC(ToInteger, TypeConversion) \
- TFC(ToInteger_TruncateMinusZero, TypeConversion) \
TFC(ToLength, TypeConversion) \
TFC(Typeof, Typeof) \
TFC(GetSuperConstructor, Typeof) \
@@ -311,20 +309,20 @@ namespace internal {
kFromIndex) \
TFS(ArrayIncludesHoleyDoubles, kElements, kSearchElement, kLength, \
kFromIndex) \
- TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayIncludes, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
TFS(ArrayIndexOfSmiOrObject, kElements, kSearchElement, kLength, kFromIndex) \
TFS(ArrayIndexOfPackedDoubles, kElements, kSearchElement, kLength, \
kFromIndex) \
TFS(ArrayIndexOfHoleyDoubles, kElements, kSearchElement, kLength, \
kFromIndex) \
- TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayIndexOf, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
- TFJ(ArrayPrototypePop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePop, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.push */ \
CPP(ArrayPush) \
- TFJ(ArrayPrototypePush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypePush, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.shift */ \
CPP(ArrayShift) \
/* ES6 #sec-array.prototype.unshift */ \
@@ -346,9 +344,9 @@ namespace internal {
TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
kMapperFunction, kThisArg) \
/* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat */ \
- TFJ(ArrayPrototypeFlat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeFlat, kDontAdaptArgumentsSentinel) \
/* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap */ \
- TFJ(ArrayPrototypeFlatMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ArrayPrototypeFlatMap, kDontAdaptArgumentsSentinel) \
\
/* ArrayBuffer */ \
/* ES #sec-arraybuffer-constructor */ \
@@ -511,8 +509,7 @@ namespace internal {
ASM(FunctionPrototypeApply, JSTrampoline) \
CPP(FunctionPrototypeBind) \
/* ES6 #sec-function.prototype.bind */ \
- TFJ(FastFunctionPrototypeBind, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(FastFunctionPrototypeBind, kDontAdaptArgumentsSentinel) \
ASM(FunctionPrototypeCall, JSTrampoline) \
/* ES6 #sec-function.prototype-@@hasinstance */ \
TFJ(FunctionPrototypeHasInstance, 1, kReceiver, kV) \
@@ -526,13 +523,11 @@ namespace internal {
TFS(CreateGeneratorObject, kClosure, kReceiver) \
CPP(GeneratorFunctionConstructor) \
/* ES6 #sec-generator.prototype.next */ \
- TFJ(GeneratorPrototypeNext, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(GeneratorPrototypeNext, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.return */ \
- TFJ(GeneratorPrototypeReturn, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(GeneratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.throw */ \
- TFJ(GeneratorPrototypeThrow, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(GeneratorPrototypeThrow, kDontAdaptArgumentsSentinel) \
CPP(AsyncFunctionConstructor) \
\
/* Iterator Protocol */ \
@@ -593,7 +588,7 @@ namespace internal {
\
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
- TFJ(MapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(MapConstructor, kDontAdaptArgumentsSentinel) \
TFJ(MapPrototypeSet, 2, kReceiver, kKey, kValue) \
TFJ(MapPrototypeDelete, 1, kReceiver, kKey) \
TFJ(MapPrototypeGet, 1, kReceiver, kKey) \
@@ -604,7 +599,7 @@ namespace internal {
/* ES #sec-get-map.prototype.size */ \
TFJ(MapPrototypeGetSize, 0, kReceiver) \
/* ES #sec-map.prototype.forEach */ \
- TFJ(MapPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(MapPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-map.prototype.keys */ \
TFJ(MapPrototypeKeys, 0, kReceiver) \
/* ES #sec-map.prototype.values */ \
@@ -616,7 +611,7 @@ namespace internal {
/* Number */ \
TFC(AllocateHeapNumber, AllocateHeapNumber) \
/* ES #sec-number-constructor */ \
- TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(NumberConstructor, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
TFJ(NumberIsFinite, 1, kReceiver, kNumber) \
/* ES6 #sec-number.isinteger */ \
@@ -663,18 +658,17 @@ namespace internal {
\
/* Object */ \
/* ES #sec-object-constructor */ \
- TFJ(ObjectConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(ObjectAssign, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectConstructor, kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectAssign, kDontAdaptArgumentsSentinel) \
/* ES #sec-object.create */ \
- TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectCreate, kDontAdaptArgumentsSentinel) \
CPP(ObjectDefineGetter) \
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
TFJ(ObjectEntries, 1, kReceiver, kObject) \
CPP(ObjectFreeze) \
- TFJ(ObjectGetOwnPropertyDescriptor, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ObjectGetOwnPropertyDescriptor, kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
@@ -747,7 +741,7 @@ namespace internal {
ASM(RegExpInterpreterTrampoline, CCall) \
\
/* Set */ \
- TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
TFJ(SetPrototypeAdd, 1, kReceiver, kKey) \
TFJ(SetPrototypeDelete, 1, kReceiver, kKey) \
@@ -757,7 +751,7 @@ namespace internal {
/* ES #sec-get-set.prototype.size */ \
TFJ(SetPrototypeGetSize, 0, kReceiver) \
/* ES #sec-set.prototype.foreach */ \
- TFJ(SetPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(SetPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-set.prototype.values */ \
TFJ(SetPrototypeValues, 0, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
@@ -786,12 +780,11 @@ namespace internal {
/* ES #sec-string.fromcodepoint */ \
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
- TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringFromCharCode, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.includes */ \
- TFJ(StringPrototypeIncludes, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeIncludes, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.indexof */ \
- TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeIndexOf, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
/* ES6 #sec-string.prototype.match */ \
@@ -805,11 +798,10 @@ namespace internal {
/* ES6 #sec-string.prototype.search */ \
TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.split */ \
- TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimStart, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrim, kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimEnd, kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimStart, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
CPP(StringRaw) \
\
@@ -825,7 +817,7 @@ namespace internal {
/* ES #sec-typedarray-constructors */ \
TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
TFJ(GenericLazyDeoptContinuation, 1, kReceiver, kResult) \
- TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayConstructor, kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0, kReceiver) \
@@ -854,14 +846,19 @@ namespace internal {
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
/* ES6 %TypedArray%.prototype.map */ \
- TFJ(TypedArrayPrototypeMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy, Dummy) \
+ ASM(WasmDebugBreak, Dummy) \
TFC(WasmAtomicNotify, WasmAtomicNotify) \
- TFC(WasmI32AtomicWait, WasmI32AtomicWait) \
- TFC(WasmI64AtomicWait, WasmI64AtomicWait) \
+ TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
+ TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \
+ TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
+ TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \
TFC(WasmMemoryGrow, WasmMemoryGrow) \
+ TFC(WasmTableInit, WasmTableInit) \
+ TFC(WasmTableCopy, WasmTableCopy) \
TFC(WasmTableGet, WasmTableGet) \
TFC(WasmTableSet, WasmTableSet) \
TFC(WasmStackGuard, NoContext) \
@@ -881,9 +878,11 @@ namespace internal {
TFS(ThrowWasmTrapDataSegmentDropped) \
TFS(ThrowWasmTrapElemSegmentDropped) \
TFS(ThrowWasmTrapTableOutOfBounds) \
+ TFS(ThrowWasmTrapBrOnExnNullRef) \
+ TFS(ThrowWasmTrapRethrowNullRef) \
\
/* WeakMap */ \
- TFJ(WeakMapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
TFJ(WeakMapGet, 1, kReceiver, kKey) \
TFJ(WeakMapPrototypeHas, 1, kReceiver, kKey) \
@@ -891,7 +890,7 @@ namespace internal {
TFJ(WeakMapPrototypeDelete, 1, kReceiver, kKey) \
\
/* WeakSet */ \
- TFJ(WeakSetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(WeakSetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(WeakSetPrototypeHas, 1, kReceiver, kKey) \
TFJ(WeakSetPrototypeAdd, 1, kReceiver, kValue) \
TFJ(WeakSetPrototypeDelete, 1, kReceiver, kValue) \
@@ -913,16 +912,13 @@ namespace internal {
CPP(AsyncGeneratorFunctionConstructor) \
/* AsyncGenerator.prototype.next ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-next */ \
- TFJ(AsyncGeneratorPrototypeNext, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncGeneratorPrototypeNext, kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.return ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-return */ \
- TFJ(AsyncGeneratorPrototypeReturn, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncGeneratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.throw ( exception ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-throw */ \
- TFJ(AsyncGeneratorPrototypeThrow, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncGeneratorPrototypeThrow, kDontAdaptArgumentsSentinel) \
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
@@ -980,16 +976,16 @@ namespace internal {
CPP(Trace) \
\
/* Weak refs */ \
- CPP(FinalizationGroupCleanupIteratorNext) \
- CPP(FinalizationGroupCleanupSome) \
- CPP(FinalizationGroupConstructor) \
- CPP(FinalizationGroupRegister) \
- CPP(FinalizationGroupUnregister) \
+ CPP(FinalizationRegistryCleanupIteratorNext) \
+ CPP(FinalizationRegistryCleanupSome) \
+ CPP(FinalizationRegistryConstructor) \
+ CPP(FinalizationRegistryRegister) \
+ CPP(FinalizationRegistryUnregister) \
CPP(WeakRefConstructor) \
CPP(WeakRefDeref) \
\
/* Async modules */ \
- TFJ(AsyncModuleEvaluate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(AsyncModuleEvaluate, kDontAdaptArgumentsSentinel) \
\
/* CallAsyncModule* are spec anonymyous functions */ \
CPP(CallAsyncModuleFulfilled) \
@@ -1041,11 +1037,9 @@ namespace internal {
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
- TFJ(ListFormatPrototypeFormat, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ListFormatPrototypeFormat, kDontAdaptArgumentsSentinel) \
/* ecma402 #sec-intl-list-format.prototype.formattoparts */ \
- TFJ(ListFormatPrototypeFormatToParts, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ListFormatPrototypeFormatToParts, kDontAdaptArgumentsSentinel) \
/* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \
CPP(ListFormatPrototypeResolvedOptions) \
/* ecma402 #sec-intl.ListFormat.supportedlocalesof */ \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index c2eb44debe..174b89795f 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -13,7 +13,34 @@
namespace v8 {
namespace internal {
+#define REVERSE_0(a) a,
+#define REVERSE_1(a, b) b, a,
+#define REVERSE_2(a, b, c) c, b, a,
+#define REVERSE_3(a, b, c, d) d, c, b, a,
+#define REVERSE_4(a, b, c, d, e) e, d, c, b, a,
+#define REVERSE_5(a, b, c, d, e, f) f, e, d, c, b, a,
+#define REVERSE_6(a, b, c, d, e, f, g) g, f, e, d, c, b, a,
+#define REVERSE_7(a, b, c, d, e, f, g, h) h, g, f, e, d, c, b, a,
+#define REVERSE_8(a, b, c, d, e, f, g, h, i) i, h, g, f, e, d, c, b, a,
+#define REVERSE_kDontAdaptArgumentsSentinel(...)
+#define REVERSE(N, ...) REVERSE_##N(__VA_ARGS__)
+
// Define interface descriptors for builtins with JS linkage.
+#ifdef V8_REVERSE_JSARGS
+#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
+ struct Builtin_##Name##_InterfaceDescriptor { \
+ enum ParameterIndices { \
+ kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ REVERSE_##Argc(__VA_ARGS__) kJSNewTarget, \
+ kJSActualArgumentsCount, \
+ kContext, \
+ kParameterCount, \
+ }; \
+ static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
+ "Inconsistent set of arguments"); \
+ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
+ };
+#else
#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
struct Builtin_##Name##_InterfaceDescriptor { \
enum ParameterIndices { \
@@ -28,6 +55,7 @@ namespace internal {
"Inconsistent set of arguments"); \
static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
+#endif
// Define interface descriptors for builtins with StubCall linkage.
#define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 689c7f1342..8693cd61f4 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -56,12 +56,16 @@ void GeneratorBuiltinsAssembler::InnerResume(
SmiConstant(resume_mode));
// Resume the {receiver} using our trampoline.
+ // Close the generator if there was an exception.
TVARIABLE(Object, var_exception);
Label if_exception(this, Label::kDeferred), if_final_return(this);
- TNode<Object> result = CallStub(CodeFactory::ResumeGenerator(isolate()),
- context, value, receiver);
- // Make sure we close the generator if there was an exception.
- GotoIfException(result, &if_exception, &var_exception);
+ TNode<Object> result;
+ {
+ compiler::ScopedExceptionHandler handler(this, &if_exception,
+ &var_exception);
+ result = CallStub(CodeFactory::ResumeGenerator(isolate()), context, value,
+ receiver);
+ }
// If the generator is not suspended (i.e., its state is 'executing'),
// close it and wrap the return value in IteratorResult.
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 880f665c02..6f4f54656d 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -42,7 +42,7 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Number> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -57,7 +57,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Number> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -266,11 +266,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
}
- TNode<BoolT> ShouldSkipFPRegs(SloppyTNode<Smi> mode) {
+ TNode<BoolT> ShouldSkipFPRegs(TNode<Smi> mode) {
return TaggedEqual(mode, SmiConstant(kDontSaveFPRegs));
}
- TNode<BoolT> ShouldEmitRememberSet(SloppyTNode<Smi> remembered_set) {
+ TNode<BoolT> ShouldEmitRememberSet(TNode<Smi> remembered_set) {
return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
}
@@ -766,7 +766,7 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
- TNode<HeapObject> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<JSReceiver> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label if_empty(this), if_runtime(this, Label::kDeferred);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index e9dca2dbc3..94a79d2a32 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -23,53 +23,38 @@ TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(
return GetProperty(context, object, factory()->iterator_symbol());
}
-IteratorRecord IteratorBuiltinsAssembler::GetIterator(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- Label* if_exception, TVariable<Object>* exception) {
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(TNode<Context> context,
+ TNode<Object> object) {
TNode<Object> method = GetIteratorMethod(context, object);
- return GetIterator(context, object, method, if_exception, exception);
+ return GetIterator(context, object, method);
}
-IteratorRecord IteratorBuiltinsAssembler::GetIterator(
- TNode<Context> context, TNode<Object> object, TNode<Object> method,
- Label* if_exception, TVariable<Object>* exception) {
- GotoIfException(method, if_exception, exception);
-
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(TNode<Context> context,
+ TNode<Object> object,
+ TNode<Object> method) {
Label if_not_callable(this, Label::kDeferred), if_callable(this);
GotoIf(TaggedIsSmi(method), &if_not_callable);
Branch(IsCallable(CAST(method)), &if_callable, &if_not_callable);
BIND(&if_not_callable);
- {
- TNode<Object> ret =
- CallRuntime(Runtime::kThrowIteratorError, context, object);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowIteratorError, context, object);
+ Unreachable();
BIND(&if_callable);
{
- Callable callable = CodeFactory::Call(isolate());
- TNode<Object> iterator = CallJS(callable, context, method, object);
- GotoIfException(iterator, if_exception, exception);
+ TNode<Object> iterator = Call(context, method, object);
Label get_next(this), if_notobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iterator), &if_notobject);
Branch(IsJSReceiver(CAST(iterator)), &get_next, &if_notobject);
BIND(&if_notobject);
- {
- TNode<Object> ret =
- CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
+ Unreachable();
BIND(&get_next);
- const TNode<Object> next =
+ TNode<Object> next =
GetProperty(context, iterator, factory()->next_string());
- GotoIfException(next, if_exception, exception);
-
return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
TNode<Object>::UncheckedCast(next)};
}
@@ -77,14 +62,10 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(
TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
TNode<Context> context, const IteratorRecord& iterator, Label* if_done,
- base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
- TVariable<Object>* exception) {
+ base::Optional<TNode<Map>> fast_iterator_result_map) {
DCHECK_NOT_NULL(if_done);
// 1. a. Let result be ? Invoke(iterator, "next", « »).
- Callable callable = CodeFactory::Call(isolate());
- TNode<Object> result =
- CallJS(callable, context, iterator.next, iterator.object);
- GotoIfException(result, if_exception, exception);
+ TNode<Object> result = Call(context, iterator.next, iterator.object);
// 3. If Type(result) is not Object, throw a TypeError exception.
Label if_notobject(this, Label::kDeferred), return_result(this);
@@ -117,17 +98,12 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
// 2. Return ToBoolean(? Get(iterResult, "done")).
TNode<Object> done =
GetProperty(context, heap_object_result, factory()->done_string());
- GotoIfException(done, if_exception, exception);
BranchIfToBooleanIsTrue(done, if_done, &return_result);
}
BIND(&if_notobject);
- {
- TNode<Object> ret =
- CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
+ Unreachable();
BIND(&return_result);
return CAST(heap_object_result);
@@ -135,8 +111,7 @@ TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
TNode<Context> context, TNode<JSReceiver> result,
- base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
- TVariable<Object>* exception) {
+ base::Optional<TNode<Map>> fast_iterator_result_map) {
Label exit(this);
TVARIABLE(Object, var_value);
if (fast_iterator_result_map) {
@@ -151,13 +126,8 @@ TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
}
// Generic iterator result case:
- {
- TNode<Object> value =
- GetProperty(context, result, factory()->value_string());
- GotoIfException(value, if_exception, exception);
- var_value = value;
- Goto(&exit);
- }
+ var_value = GetProperty(context, result, factory()->value_string());
+ Goto(&exit);
BIND(&exit);
return var_value.value();
@@ -174,23 +144,24 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
CSA_ASSERT(this, IsJSReceiver(iterator.object));
// Let return be ? GetMethod(iterator, "return").
- TNode<Object> method =
- GetProperty(context, iterator.object, factory()->return_string());
- GotoIfException(method, if_exception, exception);
+ TNode<Object> method;
+ {
+ compiler::ScopedExceptionHandler handler(this, if_exception, exception);
+ method = GetProperty(context, iterator.object, factory()->return_string());
+ }
// If return is undefined, return Completion(completion).
GotoIf(Word32Or(IsUndefined(method), IsNull(method)), if_exception);
{
// Let innerResult be Call(return, iterator, « »).
- // If an exception occurs, the original exception remains bound
- TNode<Object> inner_result =
- CallJS(CodeFactory::Call(isolate()), context, method, iterator.object);
- GotoIfException(inner_result, if_exception, nullptr);
-
- // (If completion.[[Type]] is throw) return Completion(completion).
- Goto(if_exception);
+ // If an exception occurs, the original exception remains bound.
+ compiler::ScopedExceptionHandler handler(this, if_exception, nullptr);
+ Call(context, method, iterator.object);
}
+
+ // (If completion.[[Type]] is throw) return Completion(completion).
+ Goto(if_exception);
}
void IteratorBuiltinsAssembler::IteratorCloseOnException(
@@ -317,10 +288,13 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
{
// 1. Let error be ThrowCompletion(a newly created TypeError object).
TVARIABLE(Object, var_exception);
- TNode<Object> ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIterableYieldedNonString), next_value);
- GotoIfException(ret, &if_exception, &var_exception);
+ {
+ compiler::ScopedExceptionHandler handler(this, &if_exception,
+ &var_exception);
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIterableYieldedNonString),
+ next_value);
+ }
Unreachable();
// 2. Return ? IteratorClose(iteratorRecord, error).
@@ -466,13 +440,15 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
IteratorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Smi> callSlot = CAST(Parameter(Descriptor::kCallSlot));
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
+ TNode<Smi> call_slot_smi = CAST(Parameter(Descriptor::kCallSlot));
+ TNode<TaggedIndex> call_slot = SmiToTaggedIndex(call_slot_smi);
TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback));
- TNode<Object> iteratorMethod = CAST(Parameter(Descriptor::kResult));
+ TNode<Object> iterator_method = CAST(Parameter(Descriptor::kResult));
TNode<Object> result =
CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
- iteratorMethod, callSlot, feedback);
+ iterator_method, call_slot, feedback);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 61665d2825..4d496fa384 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -24,14 +24,9 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
- IteratorRecord GetIterator(SloppyTNode<Context> context,
- SloppyTNode<Object> object,
- Label* if_exception = nullptr,
- TVariable<Object>* exception = nullptr);
+ IteratorRecord GetIterator(TNode<Context> context, TNode<Object> object);
IteratorRecord GetIterator(TNode<Context> context, TNode<Object> object,
- TNode<Object> method,
- Label* if_exception = nullptr,
- TVariable<Object>* exception = nullptr);
+ TNode<Object> method);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// If the iterator is done, goto {if_done}, otherwise returns an iterator
@@ -40,9 +35,7 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// object, loaded from the native context.
TNode<JSReceiver> IteratorStep(
TNode<Context> context, const IteratorRecord& iterator, Label* if_done,
- base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
- Label* if_exception = nullptr, TVariable<Object>* exception = nullptr);
-
+ base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt);
TNode<JSReceiver> IteratorStep(
TNode<Context> context, const IteratorRecord& iterator,
base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_done) {
@@ -55,8 +48,7 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// object, loaded from the native context.
TNode<Object> IteratorValue(
TNode<Context> context, TNode<JSReceiver> result,
- base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
- Label* if_exception = nullptr, TVariable<Object>* exception = nullptr);
+ base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt);
// https://tc39.github.io/ecma262/#sec-iteratorclose
void IteratorCloseOnException(TNode<Context> context,
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 630d65e72c..917255f9bb 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+using compiler::ScopedExceptionHandler;
+
class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
public:
explicit MicrotaskQueueBuiltinsAssembler(compiler::CodeAssemblerState* state)
@@ -45,7 +47,7 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
void RewindEnteredContext(TNode<IntPtrT> saved_entered_context_count);
void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
- SloppyTNode<HeapObject> promise_or_capability);
+ TNode<HeapObject> promise_or_capability);
};
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
@@ -118,7 +120,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Map> microtask_map = LoadMap(microtask);
TNode<Uint16T> microtask_type = LoadMapInstanceType(microtask_map);
- TVARIABLE(HeapObject, var_exception, TheHoleConstant());
+ TVARIABLE(Object, var_exception);
Label if_exception(this, Label::kDeferred);
Label is_callable(this), is_callback(this),
is_promise_fulfill_reaction_job(this),
@@ -147,10 +149,10 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<JSReceiver> callable =
LoadObjectField<JSReceiver>(microtask, CallableTask::kCallableOffset);
- const TNode<Object> result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- microtask_context, callable, UndefinedConstant());
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ Call(microtask_context, callable, UndefinedConstant());
+ }
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -173,10 +175,11 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
// But from our current measurements it doesn't seem to be a
// serious performance problem, even if the microtask is full
// of CallHandlerTasks (which is not a realistic use case anyways).
- const TNode<Object> result =
- CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
- microtask_callback, microtask_data);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ }
Goto(&done);
}
@@ -195,10 +198,11 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
- const TNode<Object> result =
- CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
- promise_to_resolve, thenable, then);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
+ promise_to_resolve, thenable, then);
+ }
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -214,24 +218,44 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
- const TNode<Object> handler =
+ const TNode<Object> job_handler =
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
const TNode<HeapObject> promise_or_capability = CAST(LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
+ TNode<Object> preserved_embedder_data = LoadObjectField(
+ microtask,
+ PromiseReactionJobTask::kContinuationPreservedEmbedderDataOffset);
+ Label preserved_data_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ preserved_embedder_data);
+ Goto(&preserved_data_done);
+ BIND(&preserved_data_done);
+
// Run the promise before/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
promise_or_capability);
- const TNode<Object> result =
- CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
- argument, handler, promise_or_capability);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
+ argument, job_handler, promise_or_capability);
+ }
// Run the promise after/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
promise_or_capability);
+ Label preserved_data_reset_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ UndefinedConstant());
+ Goto(&preserved_data_reset_done);
+ BIND(&preserved_data_reset_done);
+
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -247,24 +271,44 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> argument =
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
- const TNode<Object> handler =
+ const TNode<Object> job_handler =
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
const TNode<HeapObject> promise_or_capability = CAST(LoadObjectField(
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset));
+ TNode<Object> preserved_embedder_data = LoadObjectField(
+ microtask,
+ PromiseReactionJobTask::kContinuationPreservedEmbedderDataOffset);
+ Label preserved_data_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ preserved_embedder_data);
+ Goto(&preserved_data_done);
+ BIND(&preserved_data_done);
+
// Run the promise before/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
promise_or_capability);
- const TNode<Object> result =
- CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
- argument, handler, promise_or_capability);
- GotoIfException(result, &if_exception, &var_exception);
+ {
+ ScopedExceptionHandler handler(this, &if_exception, &var_exception);
+ CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
+ argument, job_handler, promise_or_capability);
+ }
// Run the promise after/debug hook if enabled.
RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
promise_or_capability);
+ Label preserved_data_reset_done(this);
+ GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
+ StoreContextElement(native_context,
+ Context::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+ UndefinedConstant());
+ Goto(&preserved_data_reset_done);
+ BIND(&preserved_data_reset_done);
+
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
@@ -415,7 +459,7 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
Runtime::FunctionId id, TNode<Context> context,
- SloppyTNode<HeapObject> promise_or_capability) {
+ TNode<HeapObject> promise_or_capability) {
Label hook(this, Label::kDeferred), done_hook(this);
Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
&done_hook);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 9c5cd54613..9af4affa68 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -359,7 +359,7 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
TNode<Object> method =
GetProperty(context, receiver, factory()->toString_string());
- Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
+ Return(Call(context, method, receiver));
BIND(&if_null_or_undefined);
ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
@@ -380,7 +380,9 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi);
BIND(&if_objectisnotsmi);
- TNode<Map> map = LoadMap(CAST(object));
+ TNode<HeapObject> heap_object = CAST(object);
+
+ TNode<Map> map = LoadMap(heap_object);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
{
@@ -393,12 +395,12 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
&call_runtime, &if_notunique_name);
BIND(&if_unique_name);
- TryHasOwnProperty(object, map, instance_type, var_unique.value(),
+ TryHasOwnProperty(heap_object, map, instance_type, var_unique.value(),
&return_true, &return_false, &call_runtime);
BIND(&if_index);
{
- TryLookupElement(CAST(object), map, instance_type, var_index.value(),
+ TryLookupElement(heap_object, map, instance_type, var_index.value(),
&return_true, &return_false, &return_false,
&call_runtime);
}
@@ -435,8 +437,8 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
// ES #sec-object.assign
TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -502,8 +504,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
TNode<EnumCache> object_enum_cache = LoadObjectField<EnumCache>(
object_descriptors, DescriptorArray::kEnumCacheOffset);
- TNode<Object> object_enum_keys =
- LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
+ auto object_enum_keys = LoadObjectField<FixedArrayBase>(
+ object_enum_cache, EnumCache::kKeysOffset);
// Allocate a JSArray and copy the elements from the {object_enum_keys}.
TNode<JSArray> array;
@@ -598,8 +600,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map);
TNode<EnumCache> object_enum_cache = CAST(
LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset));
- TNode<Object> object_enum_keys =
- LoadObjectField(object_enum_cache, EnumCache::kKeysOffset);
+ auto object_enum_keys = LoadObjectField<FixedArrayBase>(
+ object_enum_cache, EnumCache::kKeysOffset);
// Allocate a JSArray and copy the elements from the {object_enum_keys}.
TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -1064,8 +1066,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
int const kPrototypeArg = 0;
int const kPropertiesArg = 1;
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> prototype = args.GetOptionalArgumentValue(kPrototypeArg);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 6b1f43d8ea..c48fc0f78f 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -28,11 +28,6 @@ void PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(
}
}
-TNode<HeapObject> PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
- TNode<Context> context) {
- return Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks);
-}
-
TNode<HeapObject> PromiseBuiltinsAssembler::AllocateJSPromise(
TNode<Context> context) {
return Allocate(JSPromise::kSizeWithEmbedderFields);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 66044b51af..377db4d9e3 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -20,8 +20,6 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
void ZeroOutEmbedderOffsets(TNode<JSPromise> promise);
TNode<HeapObject> AllocateJSPromise(TNode<Context> context);
-
- TNode<HeapObject> AllocatePromiseReactionJobTask(TNode<Context> context);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index c0901953d1..caafcf6506 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -126,8 +126,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
UncheckedCast<IntPtrT>(argc_ptr));
// 8. Return Call(trap, handler, «target, thisArgument, argArray»).
- TNode<Object> result = CallJS(CodeFactory::Call(isolate()), context, trap,
- handler, target, receiver, array);
+ TNode<Object> result = Call(context, trap, handler, target, receiver, array);
args.PopAndReturn(result);
BIND(&trap_undefined);
@@ -181,8 +180,8 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
UncheckedCast<IntPtrT>(argc_ptr));
// 8. Let newObj be ? Call(trap, handler, « target, argArray, newTarget »).
- TNode<Object> new_obj = CallJS(CodeFactory::Call(isolate()), context, trap,
- handler, target, array, new_target);
+ TNode<Object> new_obj =
+ Call(context, trap, handler, target, array, new_target);
// 9. If Type(newObj) is not Object, throw a TypeError exception.
GotoIf(TaggedIsSmi(new_obj), &not_an_object);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 792f0e44a6..d06ced76d2 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -164,9 +164,9 @@ void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode<JSRegExp> regexp,
StoreObjectField(regexp, field_offset, value);
}
-void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode<Context> context,
- SloppyTNode<Object> regexp,
- SloppyTNode<Object> value) {
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(TNode<Context> context,
+ TNode<Object> regexp,
+ TNode<Object> value) {
TNode<String> name = HeapConstant(isolate()->factory()->lastIndex_string());
SetPropertyStrict(context, regexp, name, value);
}
@@ -1237,8 +1237,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
}
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
- SloppyTNode<String> string, SloppyTNode<Number> index,
- SloppyTNode<BoolT> is_unicode, bool is_fastpath) {
+ TNode<String> string, TNode<Number> index, TNode<BoolT> is_unicode,
+ bool is_fastpath) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IsNumberNormalized(index));
if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index c12ed63722..273e315599 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -37,9 +37,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
void FastStoreLastIndex(TNode<JSRegExp> regexp, TNode<Smi> value);
- void SlowStoreLastIndex(SloppyTNode<Context> context,
- SloppyTNode<Object> regexp,
- SloppyTNode<Object> value);
+ void SlowStoreLastIndex(TNode<Context> context, TNode<Object> regexp,
+ TNode<Object> value);
// Loads {var_string_start} and {var_string_end} with the corresponding
// offsets into the given {string_data}.
@@ -140,10 +139,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
const TNode<Object> maybe_pattern,
const TNode<Object> maybe_flags);
- TNode<Number> AdvanceStringIndex(SloppyTNode<String> string,
- SloppyTNode<Number> index,
- SloppyTNode<BoolT> is_unicode,
- bool is_fastpath);
+ TNode<Number> AdvanceStringIndex(TNode<String> string, TNode<Number> index,
+ TNode<BoolT> is_unicode, bool is_fastpath);
TNode<Smi> AdvanceStringIndexFast(TNode<String> string, TNode<Smi> index,
TNode<BoolT> is_unicode) {
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index e6251c9480..f89bc25bdb 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -25,20 +25,18 @@ namespace internal {
// #sec-atomics.islockfree
inline bool AtomicIsLockFree(double size) {
// According to the standard, 1, 2, and 4 byte atomics are supposed to be
- // 'lock free' on every platform. But what exactly does 'lock free' mean?
- // For example, on x64 V8 uses a lock prefix to implement the semantics of
- // many atomic operations. Is that considered a lock? Probably not.
+ // 'lock free' on every platform. 'Lock free' means that all possible uses of
+ // those atomics guarantee forward progress for the agent cluster (i.e. all
+ // threads in contrast with a single thread).
//
- // On the other hand, V8 emits a few instructions for some arm atomics which
- // do appear to be a low level form of a spin lock. With an abundance of
- // caution, we only claim to have 'true lock free' support for 8 byte sizes
- // on x64 platforms. If people care about this function returning true, then
- // we need to clarify exactly what 'lock free' means at the standard level.
- bool is_lock_free = size == 1 || size == 2 || size == 4;
-#if V8_TARGET_ARCH_x64
- is_lock_free |= size == 8;
-#endif
- return is_lock_free;
+ // This property is often, but not always, aligned with whether atomic
+ // accesses are implemented with software locks such as mutexes.
+ //
+ // V8 has lock free atomics for all sizes on all supported first-class
+ // architectures: ia32, x64, ARM32 variants, and ARM64. Further, this property
+ // is depended upon by WebAssembly, which prescribes that all atomic accesses
+ // are always lock free.
+ return size == 1 || size == 2 || size == 4 || size == 8;
}
// ES #sec-atomics.islockfree
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5fe534879a..e2d1635274 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -973,7 +973,7 @@ void StringBuiltinsAssembler::StringIndexOf(
const TNode<IntPtrT> search_length =
IntPtrSub(subject_length, start_position);
const TNode<IntPtrT> search_byte =
- ChangeInt32ToIntPtr(Load(MachineType::Uint8(), adjusted_search_ptr));
+ ChangeInt32ToIntPtr(Load<Uint8T>(adjusted_search_ptr));
const TNode<ExternalReference> memchr =
ExternalConstant(ExternalReference::libc_memchr_function());
@@ -1074,8 +1074,8 @@ TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
// ES6 String.prototype.includes(searchString [, position])
// #sec-string.prototype.includes
TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIncludes, argc, context);
}
@@ -1083,8 +1083,8 @@ TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
// ES6 String.prototype.indexOf(searchString [, position])
// #sec-string.prototype.indexof
TF_BUILTIN(StringPrototypeIndexOf, StringIncludesIndexOfAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIndexOf, argc, context);
}
@@ -1293,8 +1293,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
replace));
},
[=](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, search, receiver, replace));
+ Return(Call(context, fn, search, receiver, replace));
});
// Convert {receiver} and {search} to strings.
@@ -1394,10 +1393,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&if_iscallablereplace);
{
- Callable call_callable = CodeFactory::Call(isolate());
const TNode<Object> replacement =
- CallJS(call_callable, context, replace, UndefinedConstant(),
- search_string, match_start_index, subject_string);
+ Call(context, replace, UndefinedConstant(), search_string,
+ match_start_index, subject_string);
const TNode<String> replacement_string =
ToString_Inline(context, replacement);
var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context,
@@ -1463,8 +1461,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
context, maybe_regexp, receiver, symbol, property_to_check,
[=] { Return(CallBuiltin(builtin, context, maybe_regexp, receiver)); },
[=](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ Return(Call(context, fn, maybe_regexp, receiver));
});
// maybe_regexp is not a RegExp nor has [@@match / @@search] property.
@@ -1494,9 +1491,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
BIND(&slow_path);
{
TNode<Object> maybe_func = GetProperty(context, regexp, symbol);
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, maybe_func, regexp,
- receiver_string));
+ Return(Call(context, maybe_func, regexp, receiver_string));
}
}
}
@@ -1588,8 +1583,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
RegExpPrototypeMatchAllImpl(context, native_context, maybe_regexp, s));
};
auto if_generic_call = [=](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ Return(Call(context, fn, maybe_regexp, receiver));
};
MaybeCallFunctionAtSymbol(
context, maybe_regexp, receiver, isolate()->factory()->match_all_symbol(),
@@ -1606,10 +1600,9 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
maybe_regexp, StringConstant("g"));
// 5. Return ? Invoke(rx, @@matchAll, « S »).
- Callable callable = CodeFactory::Call(isolate());
TNode<Object> match_all_func =
GetProperty(context, rx, isolate()->factory()->match_all_symbol());
- Return(CallJS(callable, context, match_all_func, rx, s));
+ Return(Call(context, match_all_func, rx, s));
}
// ES6 #sec-string.prototype.search
@@ -1699,8 +1692,8 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
const int kSeparatorArg = 0;
const int kLimitArg = 1;
- const TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -1724,9 +1717,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
separator, receiver, limit));
},
[&](TNode<Object> fn) {
- Callable call_callable = CodeFactory::Call(isolate());
- args.PopAndReturn(
- CallJS(call_callable, context, fn, separator, receiver, limit));
+ args.PopAndReturn(Call(context, fn, separator, receiver, limit));
});
// String and integer conversions.
@@ -1808,8 +1799,8 @@ TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) {
// ES6 #sec-string.prototype.trim
TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(String::kTrim, "String.prototype.trim", argc, context);
@@ -1817,8 +1808,8 @@ TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
// https://github.com/tc39/proposal-string-left-right-trim
TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(String::kTrimStart, "String.prototype.trimLeft", argc, context);
@@ -1826,8 +1817,8 @@ TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
// https://github.com/tc39/proposal-string-left-right-trim
TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(String::kTrimEnd, "String.prototype.trimRight", argc, context);
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 3ef8fc2a9b..61cd984e7f 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -68,7 +68,7 @@ namespace string {
const string: String = ToThisString(receiver, methodName);
// 3. Let position be ? ToInteger(pos).
- const indexNumber: Number = ToInteger_Inline(position, kTruncateMinusZero);
+ const indexNumber: Number = ToInteger_Inline(position);
// Convert the {position} to a uintptr and check that it's in bounds of
// the {string}.
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 23339fa1c8..021a0e9240 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -89,8 +89,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<IntPtrT> argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
CodeStubArguments args(this, argc);
TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
@@ -378,13 +378,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
-TNode<BoolT> TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(
- TNode<JSArrayBuffer> buffer) {
- TNode<Uint32T> bitfield =
- LoadObjectField<Uint32T>(buffer, JSArrayBuffer::kBitFieldOffset);
- return IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield);
-}
-
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 7424020596..1008b6bdd7 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -85,8 +85,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
- TNode<BoolT> IsSharedArrayBuffer(TNode<JSArrayBuffer> buffer);
-
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
index c8c9a2522c..82d5fe2873 100644
--- a/deps/v8/src/builtins/builtins-utils-inl.h
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -23,11 +23,21 @@ Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate,
Handle<Object> BuiltinArguments::receiver() const { return at<Object>(0); }
Handle<JSFunction> BuiltinArguments::target() const {
- return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
+#ifdef V8_REVERSE_JSARGS
+ int index = kTargetOffset;
+#else
+ int index = Arguments::length() - 1 - kTargetOffset;
+#endif
+ return Handle<JSFunction>(address_of_arg_at(index));
}
Handle<HeapObject> BuiltinArguments::new_target() const {
- return Arguments::at<HeapObject>(Arguments::length() - 1 - kNewTargetOffset);
+#ifdef V8_REVERSE_JSARGS
+ int index = kNewTargetOffset;
+#else
+ int index = Arguments::length() - 1 - kNewTargetOffset;
+#endif
+ return Handle<JSFunction>(address_of_arg_at(index));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 601dfd5813..3bed3bc651 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
// Arguments object passed to C++ builtins.
-class BuiltinArguments : public Arguments {
+class BuiltinArguments : public JavaScriptArguments {
public:
BuiltinArguments(int length, Address* arguments)
: Arguments(length, arguments) {
@@ -25,13 +25,24 @@ class BuiltinArguments : public Arguments {
Object operator[](int index) const {
DCHECK_LT(index, length());
- return Arguments::operator[](index);
+ return Object(*address_of_arg_at(index + kArgsOffset));
}
template <class S = Object>
Handle<S> at(int index) const {
DCHECK_LT(index, length());
- return Arguments::at<S>(index);
+ return Handle<S>(address_of_arg_at(index + kArgsOffset));
+ }
+
+ inline void set_at(int index, Object value) {
+ DCHECK_LT(index, length());
+ *address_of_arg_at(index + kArgsOffset) = value.ptr();
+ }
+
+ // Note: this should return the address after the receiver,
+ // even when length() == 1.
+ inline Address* address_of_first_argument() const {
+ return address_of_arg_at(kArgsOffset + 1); // Skips receiver.
}
static constexpr int kNewTargetOffset = 0;
@@ -42,6 +53,12 @@ class BuiltinArguments : public Arguments {
static constexpr int kNumExtraArgs = 4;
static constexpr int kNumExtraArgsWithReceiver = 5;
+#ifdef V8_REVERSE_JSARGS
+ static constexpr int kArgsOffset = 4;
+#else
+ static constexpr int kArgsOffset = 0;
+#endif
+
inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const;
inline Handle<Object> receiver() const;
inline Handle<JSFunction> target() const;
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 0f5d86e646..770f5da97b 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -28,6 +28,13 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
IntPtrConstant(WasmInstanceObject::kNativeContextOffset -
kHeapObjectTag)));
}
+
+ TNode<Smi> SmiFromUint32WithSaturation(TNode<Uint32T> value, uint32_t max) {
+ DCHECK_LE(max, static_cast<uint32_t>(Smi::kMaxValue));
+ TNode<Uint32T> capped_value = SelectConstant(
+ Uint32LessThan(value, Uint32Constant(max)), value, Uint32Constant(max));
+ return SmiFromUint32(capped_value);
+ }
};
TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) {
@@ -53,7 +60,16 @@ TF_BUILTIN(WasmRethrow, WasmBuiltinsAssembler) {
TNode<Object> exception = CAST(Parameter(Descriptor::kException));
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
+
+ Label nullref(this, Label::kDeferred);
+ GotoIf(TaggedEqual(NullConstant(), exception), &nullref);
+
TailCallRuntime(Runtime::kReThrow, context, exception);
+
+ BIND(&nullref);
+ MessageTemplate message_id = MessageTemplate::kWasmTrapRethrowNullRef;
+ TailCallRuntime(Runtime::kThrowWasmError, context,
+ SmiConstant(static_cast<int>(message_id)));
}
TF_BUILTIN(WasmTraceMemory, WasmBuiltinsAssembler) {
@@ -79,48 +95,118 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) {
+TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
+ if (!Is32()) {
+ Unreachable();
+ return;
+ }
+
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ TNode<Number> address_number = ChangeUint32ToTagged(address);
+
TNode<Int32T> expected_value =
UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
- TNode<Float64T> timeout =
- UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
+ TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
+
+ TNode<IntPtrT> timeout_low =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
+ TNode<IntPtrT> timeout_high =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TNode<Smi> result_smi =
+ CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
+ address_number, expected_value_number, timeout));
+ Return(Unsigned(SmiToInt32(result_smi)));
+}
+
+TF_BUILTIN(WasmI32AtomicWait64, WasmBuiltinsAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
+
+ TNode<Int32T> expected_value =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
- TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
+
+ TNode<IntPtrT> timeout_raw =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
+ TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
+
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
- address_number, expected_value_number, timeout_number));
+ address_number, expected_value_number, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) {
+TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
+ if (!Is32()) {
+ Unreachable();
+ return;
+ }
+
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
- TNode<Uint32T> expected_value_high =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueHigh));
- TNode<Uint32T> expected_value_low =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueLow));
- TNode<Float64T> timeout =
- UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
+ TNode<Number> address_number = ChangeUint32ToTagged(address);
+
+ TNode<IntPtrT> expected_value_low =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueLow));
+ TNode<IntPtrT> expected_value_high =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueHigh));
+ TNode<BigInt> expected_value =
+ BigIntFromInt32Pair(expected_value_low, expected_value_high);
+
+ TNode<IntPtrT> timeout_low =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
+ TNode<IntPtrT> timeout_high =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TNode<Smi> result_smi =
+ CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
+ address_number, expected_value, timeout));
+ Return(Unsigned(SmiToInt32(result_smi)));
+}
+
+TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) {
+ if (!Is64()) {
+ Unreachable();
+ return;
+ }
+
+ TNode<Uint32T> address =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<Number> expected_value_high_number =
- ChangeUint32ToTagged(expected_value_high);
- TNode<Number> expected_value_low_number =
- ChangeUint32ToTagged(expected_value_low);
- TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
+
+ TNode<IntPtrT> expected_value_raw =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValue));
+ TNode<BigInt> expected_value = BigIntFromInt64(expected_value_raw);
+
+ TNode<IntPtrT> timeout_raw =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
+ TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
+
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
- TNode<Smi> result_smi = CAST(CallRuntime(
- Runtime::kWasmI64AtomicWait, context, instance, address_number,
- expected_value_high_number, expected_value_low_number, timeout_number));
+ TNode<Smi> result_smi =
+ CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
+ address_number, expected_value, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
@@ -144,6 +230,66 @@ TF_BUILTIN(WasmMemoryGrow, WasmBuiltinsAssembler) {
Return(Int32Constant(-1));
}
+TF_BUILTIN(WasmTableInit, WasmBuiltinsAssembler) {
+ TNode<Uint32T> dst_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
+ // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make
+ // sure that the values fit into a Smi.
+ STATIC_ASSERT(static_cast<size_t>(Smi::kMaxValue) >=
+ wasm::kV8MaxWasmTableSize + 1);
+ constexpr uint32_t kCap =
+ static_cast<uint32_t>(wasm::kV8MaxWasmTableSize + 1);
+ TNode<Smi> dst = SmiFromUint32WithSaturation(dst_raw, kCap);
+ TNode<Uint32T> src_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSource));
+ TNode<Smi> src = SmiFromUint32WithSaturation(src_raw, kCap);
+ TNode<Uint32T> size_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSize));
+ TNode<Smi> size = SmiFromUint32WithSaturation(size_raw, kCap);
+ TNode<Smi> table_index =
+ UncheckedCast<Smi>(Parameter(Descriptor::kTableIndex));
+ TNode<Smi> segment_index =
+ UncheckedCast<Smi>(Parameter(Descriptor::kSegmentIndex));
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TailCallRuntime(Runtime::kWasmTableInit, context, instance, table_index,
+ segment_index, dst, src, size);
+}
+
+TF_BUILTIN(WasmTableCopy, WasmBuiltinsAssembler) {
+ // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make
+ // sure that the values fit into a Smi.
+ STATIC_ASSERT(static_cast<size_t>(Smi::kMaxValue) >=
+ wasm::kV8MaxWasmTableSize + 1);
+ constexpr uint32_t kCap =
+ static_cast<uint32_t>(wasm::kV8MaxWasmTableSize + 1);
+
+ TNode<Uint32T> dst_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
+ TNode<Smi> dst = SmiFromUint32WithSaturation(dst_raw, kCap);
+
+ TNode<Uint32T> src_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSource));
+ TNode<Smi> src = SmiFromUint32WithSaturation(src_raw, kCap);
+
+ TNode<Uint32T> size_raw =
+ UncheckedCast<Uint32T>(Parameter(Descriptor::kSize));
+ TNode<Smi> size = SmiFromUint32WithSaturation(size_raw, kCap);
+
+ TNode<Smi> dst_table =
+ UncheckedCast<Smi>(Parameter(Descriptor::kDestinationTable));
+
+ TNode<Smi> src_table =
+ UncheckedCast<Smi>(Parameter(Descriptor::kSourceTable));
+
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<Context> context = LoadContextFromInstance(instance);
+
+ TailCallRuntime(Runtime::kWasmTableCopy, context, instance, dst_table,
+ src_table, dst, src, size);
+}
+
TF_BUILTIN(WasmTableGet, WasmBuiltinsAssembler) {
TNode<Int32T> entry_index =
UncheckedCast<Int32T>(Parameter(Descriptor::kEntryIndex));
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index 28fb9c9cbd..e75c7fae9d 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-BUILTIN(FinalizationGroupConstructor) {
+BUILTIN(FinalizationRegistryConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
@@ -31,22 +31,22 @@ BUILTIN(FinalizationGroupConstructor) {
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSFinalizationGroup> finalization_group =
- Handle<JSFinalizationGroup>::cast(result);
- finalization_group->set_native_context(*isolate->native_context());
- finalization_group->set_cleanup(*cleanup);
- finalization_group->set_flags(
- JSFinalizationGroup::ScheduledForCleanupField::encode(false));
-
- DCHECK(finalization_group->active_cells().IsUndefined(isolate));
- DCHECK(finalization_group->cleared_cells().IsUndefined(isolate));
- DCHECK(finalization_group->key_map().IsUndefined(isolate));
- return *finalization_group;
+ Handle<JSFinalizationRegistry> finalization_registry =
+ Handle<JSFinalizationRegistry>::cast(result);
+ finalization_registry->set_native_context(*isolate->native_context());
+ finalization_registry->set_cleanup(*cleanup);
+ finalization_registry->set_flags(
+ JSFinalizationRegistry::ScheduledForCleanupField::encode(false));
+
+ DCHECK(finalization_registry->active_cells().IsUndefined(isolate));
+ DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
+ DCHECK(finalization_registry->key_map().IsUndefined(isolate));
+ return *finalization_registry;
}
-BUILTIN(FinalizationGroupRegister) {
+BUILTIN(FinalizationRegistryRegister) {
HandleScope scope(isolate);
- const char* method_name = "FinalizationGroup.prototype.register";
+ const char* method_name = "FinalizationRegistry.prototype.register";
// 1. Let finalizationGroup be the this value.
//
@@ -55,7 +55,7 @@ BUILTIN(FinalizationGroupRegister) {
//
// 4. If finalizationGroup does not have a [[Cells]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+ CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
Handle<Object> target = args.atOrUndefined(isolate, 1);
@@ -86,15 +86,15 @@ BUILTIN(FinalizationGroupRegister) {
}
// TODO(marja): Realms.
- JSFinalizationGroup::Register(finalization_group,
- Handle<JSReceiver>::cast(target), holdings,
- unregister_token, isolate);
+ JSFinalizationRegistry::Register(finalization_registry,
+ Handle<JSReceiver>::cast(target), holdings,
+ unregister_token, isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
-BUILTIN(FinalizationGroupUnregister) {
+BUILTIN(FinalizationRegistryUnregister) {
HandleScope scope(isolate);
- const char* method_name = "FinalizationGroup.prototype.unregister";
+ const char* method_name = "FinalizationRegistry.prototype.unregister";
// 1. Let finalizationGroup be the this value.
//
@@ -103,7 +103,7 @@ BUILTIN(FinalizationGroupUnregister) {
//
// 3. If finalizationGroup does not have a [[Cells]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+ CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
Handle<Object> unregister_token = args.atOrUndefined(isolate, 1);
@@ -115,15 +115,16 @@ BUILTIN(FinalizationGroupUnregister) {
unregister_token));
}
- bool success = JSFinalizationGroup::Unregister(
- finalization_group, Handle<JSReceiver>::cast(unregister_token), isolate);
+ bool success = JSFinalizationRegistry::Unregister(
+ finalization_registry, Handle<JSReceiver>::cast(unregister_token),
+ isolate);
return *isolate->factory()->ToBoolean(success);
}
-BUILTIN(FinalizationGroupCleanupSome) {
+BUILTIN(FinalizationRegistryCleanupSome) {
HandleScope scope(isolate);
- const char* method_name = "FinalizationGroup.prototype.cleanupSome";
+ const char* method_name = "FinalizationRegistry.prototype.cleanupSome";
// 1. Let finalizationGroup be the this value.
//
@@ -132,9 +133,9 @@ BUILTIN(FinalizationGroupCleanupSome) {
//
// 3. If finalizationGroup does not have a [[Cells]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
+ CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
- Handle<Object> callback(finalization_group->cleanup(), isolate);
+ Handle<Object> callback(finalization_registry->cleanup(), isolate);
Handle<Object> callback_obj = args.atOrUndefined(isolate, 1);
// 4. If callback is not undefined and IsCallable(callback) is
@@ -148,10 +149,9 @@ BUILTIN(FinalizationGroupCleanupSome) {
callback = callback_obj;
}
- // Don't do set_scheduled_for_cleanup(false); we still have the microtask
- // scheduled and don't want to schedule another one in case the user never
- // executes microtasks.
- if (JSFinalizationGroup::Cleanup(isolate, finalization_group, callback)
+ // Don't do set_scheduled_for_cleanup(false); we still have the task
+ // scheduled.
+ if (JSFinalizationRegistry::Cleanup(isolate, finalization_registry, callback)
.IsNothing()) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
@@ -159,19 +159,20 @@ BUILTIN(FinalizationGroupCleanupSome) {
return ReadOnlyRoots(isolate).undefined_value();
}
-BUILTIN(FinalizationGroupCleanupIteratorNext) {
+BUILTIN(FinalizationRegistryCleanupIteratorNext) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSFinalizationGroupCleanupIterator, iterator, "next");
+ CHECK_RECEIVER(JSFinalizationRegistryCleanupIterator, iterator, "next");
- Handle<JSFinalizationGroup> finalization_group(iterator->finalization_group(),
- isolate);
- if (!finalization_group->NeedsCleanup()) {
+ Handle<JSFinalizationRegistry> finalization_registry(
+ iterator->finalization_registry(), isolate);
+ if (!finalization_registry->NeedsCleanup()) {
return *isolate->factory()->NewJSIteratorResult(
handle(ReadOnlyRoots(isolate).undefined_value(), isolate), true);
}
- Handle<Object> holdings = handle(
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate),
- isolate);
+ Handle<Object> holdings =
+ handle(JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate),
+ isolate);
return *isolate->factory()->NewJSIteratorResult(holdings, false);
}
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 955656d669..34f7ddc18a 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -8,6 +8,7 @@
#include "src/builtins/builtins-descriptors.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/diagnostics/code-tracer.h"
#include "src/execution/isolate.h"
@@ -319,6 +320,7 @@ class OffHeapTrampolineGenerator {
{
FrameScope scope(&masm_, StackFrame::NONE);
if (type == TrampolineType::kJump) {
+ masm_.CodeEntry();
masm_.JumpToInstructionStream(off_heap_entry);
} else {
masm_.Trap();
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 7e8d08097f..cb7ff412de 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -20,7 +20,6 @@ extern macro IsContext(HeapObject): bool;
extern macro IsNativeContext(HeapObject): bool;
extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
-extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
extern macro IsBigInt(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
@@ -624,8 +623,7 @@ Cast<DebugInfo>(implicit context: Context)(o: HeapObject): DebugInfo
extern macro IsCoverageInfo(HeapObject): bool;
Cast<CoverageInfo>(implicit context: Context)(o: HeapObject): CoverageInfo
labels CastError {
- // TODO(jgruber): Assign an instance type.
- if (IsFixedArray(o)) return %RawDownCast<CoverageInfo>(o);
+ if (IsCoverageInfo(o)) return %RawDownCast<CoverageInfo>(o);
goto CastError;
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index fe44fe1287..ee9be1d411 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -168,6 +168,12 @@ Convert<intptr, Smi>(s: Smi): intptr {
Convert<uintptr, PositiveSmi>(ps: PositiveSmi): uintptr {
return Unsigned(SmiUntag(ps));
}
+Convert<intptr, TaggedIndex>(ti: TaggedIndex): intptr {
+ return TaggedIndexToIntPtr(ti);
+}
+Convert<TaggedIndex, intptr>(i: intptr): TaggedIndex {
+ return IntPtrToTaggedIndex(i);
+}
Convert<intptr, uintptr>(ui: uintptr): intptr {
const i = Signed(ui);
assert(i >= 0);
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 739cdfdcdc..5bea93214c 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -5,17 +5,18 @@
#if V8_TARGET_ARCH_IA32
#include "src/api/api-arguments.h"
+#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -65,6 +66,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void CompareStackLimit(MacroAssembler* masm, Register with,
+ StackLimitKind kind) {
+ DCHECK(masm->root_array_available());
+ Isolate* isolate = masm->isolate();
+ // Address through the root register. No load is needed.
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ __ cmp(with, Operand(kRootRegister, offset));
+}
+
void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch, Label* stack_overflow,
bool include_receiver = false) {
@@ -637,7 +656,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRealStackLimit(esp);
+ CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// Pop return address.
@@ -856,22 +875,30 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register scratch1, Register scratch2,
- Label* if_return) {
+ Register scratch3, Label* if_return) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
Immediate(ExternalReference::bytecode_size_table_address()));
// Load the current bytecode.
- __ movzx_b(bytecode, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -910,9 +937,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmp(bytecode,
+ Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ j(not_equal, &not_jump_loop, Label::kNear);
+ // If this is a wide or extra wide JumpLoop, we need to restore the original
+ // bytecode_offset since we might have increased it to skip the wide /
+ // extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end, Label::kNear);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset,
Operand(bytecode_size_table, bytecode, times_int_size, 0));
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1028,7 +1070,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ mov(eax, esp);
__ sub(eax, frame_size);
- __ CompareRealStackLimit(eax);
+ CompareStackLimit(masm, eax, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1056,8 +1098,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(Operand(ebp, ecx, times_system_pointer_size, 0), edx);
__ bind(&no_incoming_new_target_or_generator_register);
- // Load accumulator and bytecode offset into registers.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ CompareStackLimit(masm, esp, StackLimitKind::kInterruptStackLimit);
+ __ j(below, &stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
+ // The accumulator is already loaded with undefined.
+
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1088,16 +1137,45 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
+ __ Push(eax);
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ecx,
- kInterpreterDispatchTableRegister, &do_return);
+ kInterpreterDispatchTableRegister, eax,
+ &do_return);
+ __ Pop(eax);
__ jmp(&do_dispatch);
__ bind(&do_return);
+ __ Pop(eax);
// The return value is in eax.
LeaveInterpreterFrame(masm, edx, ecx);
__ ret(0);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ mov(kInterpreterBytecodeArrayRegister,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ // It's ok to clobber kInterpreterBytecodeOffsetRegister since we are setting
+ // it again after continuing.
+ __ SmiTag(kInterpreterBytecodeOffsetRegister);
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ kInterpreterBytecodeOffsetRegister);
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1408,6 +1486,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ j(greater_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
@@ -1425,12 +1512,21 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ j(equal, &function_entry_bytecode);
+
// Advance to the next bytecode.
Label if_return;
+ __ Push(eax);
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ecx, esi,
- &if_return);
+ eax, &if_return);
+ __ Pop(eax);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ecx, kInterpreterBytecodeOffsetRegister);
__ SmiTag(ecx);
@@ -1438,8 +1534,18 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
+ // No need to pop eax here since we will be aborting anyway.
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
@@ -1524,9 +1630,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // -- esp[4] : argArray
- // -- esp[8] : thisArg
- // -- esp[12] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : thisArg
+ // -- args[2] : argArray
// -----------------------------------
// 1. Load receiver into xmm0, argArray into edx (if present), remove all
@@ -1534,20 +1641,19 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// present) instead.
{
Label no_arg_array, no_this_arg;
+ StackArgumentsAccessor args(eax);
// Spill receiver to allow the usage of edi as a scratch register.
- __ movd(xmm0,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ __ movd(xmm0, args[0]);
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
__ test(eax, eax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ mov(edi, Operand(esp, eax, times_system_pointer_size, 0));
+ __ mov(edi, args[1]);
__ cmp(eax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ mov(edx,
- Operand(esp, eax, times_system_pointer_size, -kSystemPointerSize));
+ __ mov(edx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
@@ -1615,7 +1721,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
// 2. Get the callable to call (passed as receiver) from the stack.
- __ mov(edi, Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ {
+ StackArgumentsAccessor args(eax);
+ __ mov(edi, args.GetReceiverOperand());
+ }
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1641,10 +1750,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // -- esp[4] : argumentsList
- // -- esp[8] : thisArgument
- // -- esp[12] : target
- // -- esp[16] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : thisArgument
+ // -- args[3] : argumentsList
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into edx (if present),
@@ -1652,20 +1762,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// thisArgument (if present) instead.
{
Label done;
+ StackArgumentsAccessor args(eax);
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_system_pointer_size,
- -0 * kSystemPointerSize));
+ __ mov(edi, args[1]); // target
__ j(equal, &done, Label::kNear);
- __ mov(ecx, Operand(esp, eax, times_system_pointer_size,
- -1 * kSystemPointerSize));
+ __ mov(ecx, args[2]); // thisArgument
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_system_pointer_size,
- -2 * kSystemPointerSize));
+ __ mov(edx, args[3]); // argumentsList
__ bind(&done);
// Spill argumentsList to use edx as a scratch register.
@@ -1701,10 +1809,11 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // -- esp[4] : new.target (optional)
- // -- esp[8] : argumentsList
- // -- esp[12] : target
- // -- esp[16] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : argumentsList
+ // -- args[3] : new.target (optional)
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into ecx (if present),
@@ -1713,21 +1822,19 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// (if present) instead.
{
Label done;
+ StackArgumentsAccessor args(eax);
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_system_pointer_size,
- -0 * kSystemPointerSize));
+ __ mov(edi, args[1]); // target
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
- __ mov(ecx, Operand(esp, eax, times_system_pointer_size,
- -1 * kSystemPointerSize));
+ __ mov(ecx, args[2]); // argumentsList
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_system_pointer_size,
- -2 * kSystemPointerSize));
+ __ mov(edx, args[3]); // new.target
__ bind(&done);
// Spill argumentsList to use ecx as a scratch register.
@@ -1989,6 +2096,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
+ StackArgumentsAccessor args(eax);
__ AssertFunction(edi);
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
@@ -2022,15 +2130,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(ecx);
} else {
Label convert_to_object, convert_receiver;
- __ mov(ecx,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ __ mov(ecx, args.GetReceiverOperand());
__ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx.
__ j(above_equal, &done_convert);
// Reload the receiver (it was clobbered by CmpObjectType).
- __ mov(ecx,
- Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
+ __ mov(ecx, args.GetReceiverOperand());
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy,
@@ -2066,8 +2172,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
- ecx);
+ __ mov(args.GetReceiverOperand(), ecx);
}
__ bind(&done_convert);
@@ -2125,7 +2230,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRealStackLimit(esp);
+ CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
__ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
@@ -2202,8 +2307,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(edi);
// Patch the receiver to [[BoundThis]].
+ StackArgumentsAccessor args(eax);
__ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), ecx);
+ __ mov(args.GetReceiverOperand(), ecx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2220,6 +2326,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
// -----------------------------------
+ StackArgumentsAccessor args(eax);
Label non_callable, non_function, non_smi, non_jsfunction,
non_jsboundfunction;
@@ -2251,7 +2358,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), edi);
+ __ mov(args.GetReceiverOperand(), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(edi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2334,6 +2441,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
+ StackArgumentsAccessor args(eax);
// Check if target is a Smi.
Label non_constructor, non_proxy, non_jsfunction, non_jsboundfunction;
@@ -2370,8 +2478,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
- edi);
+ __ mov(args.GetReceiverOperand(), edi);
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(edi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2396,8 +2503,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
const Register kExpectedNumberOfArgumentsRegister = ecx;
Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
- __ cmp(kExpectedNumberOfArgumentsRegister,
- SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ cmp(kExpectedNumberOfArgumentsRegister, kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
__ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &too_few);
@@ -2564,7 +2670,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
- // Push the WASM instance as an explicit argument to WasmCompileLazy.
+ // Push the Wasm instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(kWasmCompileLazyFuncIndexRegister);
@@ -2594,6 +2700,49 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ jmp(edi);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Push(Register::from_code(reg_code));
+ }
+
+ constexpr int kFpStackSize =
+ kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
+ __ AllocateStackSpace(kFpStackSize);
+ int offset = kFpStackSize;
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ offset -= kSimd128Size;
+ __ movdqu(Operand(esp, offset), DoubleRegister::from_code(reg_code));
+ }
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ __ movdqu(DoubleRegister::from_code(reg_code), Operand(esp, offset));
+ offset += kSimd128Size;
+ }
+ __ add(esp, Immediate(kFpStackSize));
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Pop(Register::from_code(reg_code));
+ }
+ }
+
+ __ ret(0);
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index ebedbdce75..65cb207eaa 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -6,46 +6,20 @@
namespace internal_coverage {
- const kHasCoverageInfo:
- constexpr int31 generates 'DebugInfo::kHasCoverageInfo';
-
- const kFirstSlotIndex:
- constexpr int31 generates 'CoverageInfo::kFirstSlotIndex';
- const kSlotBlockCountIndex:
- constexpr int31 generates 'CoverageInfo::kSlotBlockCountIndex';
- const kSlotIndexCountLog2:
- constexpr int31 generates 'CoverageInfo::kSlotIndexCountLog2';
- const kSlotIndexCountMask:
- constexpr int31 generates 'CoverageInfo::kSlotIndexCountMask';
-
macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
CoverageInfo labels IfNoCoverageInfo {
const shared: SharedFunctionInfo = function.shared_function_info;
const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
otherwise goto IfNoCoverageInfo;
- if ((debugInfo.flags & kHasCoverageInfo) == 0) goto IfNoCoverageInfo;
+ if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo;
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
}
- macro SlotCount(coverageInfo: CoverageInfo): Smi {
- assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
- assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask));
- return coverageInfo.length >> kSlotIndexCountLog2;
- }
-
- macro FirstIndexForSlot(implicit context: Context)(slot: Smi): Smi {
- assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
- return slot << kSlotIndexCountLog2;
- }
-
macro IncrementBlockCount(implicit context: Context)(
coverageInfo: CoverageInfo, slot: Smi) {
- assert(slot < SlotCount(coverageInfo));
- const slotStart: Smi = FirstIndexForSlot(slot);
- const index: Smi = slotStart + kSlotBlockCountIndex;
- coverageInfo.objects[index] =
- UnsafeCast<Smi>(coverageInfo.objects[index]) + 1;
+ assert(Convert<int32>(slot) < coverageInfo.slot_count);
+ ++coverageInfo.slots[slot].block_count;
}
builtin IncBlockCounter(implicit context: Context)(
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 51a9564850..272a2a7db8 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -52,8 +52,8 @@ namespace iterator {
Context)(JSAny);
transitioning builtin GetIteratorWithFeedback(
- context: Context, receiver: JSAny, loadSlot: Smi, callSlot: Smi,
- feedback: Undefined|FeedbackVector): JSAny {
+ context: Context, receiver: JSAny, loadSlot: TaggedIndex,
+ callSlot: TaggedIndex, feedback: Undefined|FeedbackVector): JSAny {
let iteratorMethod: JSAny;
typeswitch (feedback) {
case (Undefined): {
@@ -64,8 +64,10 @@ namespace iterator {
context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
}
}
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
+ const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
return CallIteratorWithFeedback(
- context, receiver, iteratorMethod, callSlot, feedback);
+ context, receiver, iteratorMethod, callSlotSmi, feedback);
}
transitioning builtin CallIteratorWithFeedback(
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 2561177cd6..cb1a86db2f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -23,6 +23,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -61,10 +62,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -143,7 +150,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ subu(scratch1, sp, scratch1);
@@ -354,7 +361,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
__ Subu(scratch1, sp, scratch1);
@@ -701,7 +708,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
@@ -1058,7 +1065,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Subu(t1, sp, Operand(t0));
- LoadRealStackLimit(masm, a2);
+ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, t1, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1087,6 +1094,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(a3, MemOperand(t1));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, a2, StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a2));
+ __ bind(&after_stack_check_interrupt);
+
// Load accumulator with undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1128,6 +1142,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Sw(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Lw(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
+ __ Sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1332,6 +1370,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1351,6 +1398,11 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
// Load the current bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1362,12 +1414,22 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1982,7 +2044,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
@@ -2139,7 +2201,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
@@ -2264,7 +2326,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ Branch(&dont_adapt_arguments, eq, a2,
- Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ Operand(kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
__ Branch(&too_few, Uless, a0, Operand(a2));
@@ -2397,7 +2459,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf(a0, a1, a2, a3);
+ constexpr RegList gp_regs = Register::ListOf(a0, a2, a3);
constexpr RegList fp_regs =
DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14);
__ MultiPush(gp_regs);
@@ -2419,6 +2481,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(kScratchReg, v0, 0);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 8e91f8840f..baf2d5bfec 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -23,6 +23,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -60,10 +61,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -144,7 +151,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -394,7 +401,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
@@ -501,7 +508,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -1076,7 +1083,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Dsubu(a5, sp, Operand(a4));
- LoadRealStackLimit(masm, a2);
+ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, a5, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1105,6 +1112,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sd(a3, MemOperand(a5));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
+ __ bind(&after_stack_check_interrupt);
+
// Load accumulator as undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1147,6 +1161,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Sd(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
+ __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1350,6 +1388,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1369,6 +1416,11 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
// Load the current bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
@@ -1380,12 +1432,22 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -2023,7 +2085,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
@@ -2176,7 +2238,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadRealStackLimit(masm, kScratchReg);
+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
@@ -2300,7 +2362,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ Branch(&dont_adapt_arguments, eq, a2,
- Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ Operand(kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
__ Branch(&too_few, Uless, a0, Operand(a2));
@@ -2436,7 +2498,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
constexpr RegList gp_regs =
- Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7);
+ Register::ListOf(a0, a2, a3, a4, a5, a6, a7);
constexpr RegList fp_regs =
DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14);
__ MultiPush(gp_regs);
@@ -2458,6 +2520,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(v0);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq
index 37d51812a9..958cd5f5f6 100644
--- a/deps/v8/src/builtins/number.tq
+++ b/deps/v8/src/builtins/number.tq
@@ -34,7 +34,7 @@ namespace number {
// 4. Else, let radixNumber be ? ToInteger(radix).
const radix: JSAny = arguments[0];
const radixNumber: Number =
- radix == Undefined ? 10 : ToInteger_Inline(radix, kTruncateMinusZero);
+ radix == Undefined ? 10 : ToInteger_Inline(radix);
// 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
if (radixNumber < 2 || radixNumber > 36) {
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 97e870959a..460d749297 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -2,25 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -58,10 +59,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+void LoadStackLimit(MacroAssembler* masm, Register destination,
+ StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -75,7 +82,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
@@ -424,7 +431,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmpl(sp, scratch);
__ blt(&stack_overflow);
@@ -717,7 +724,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadRealStackLimit(masm, scratch1);
+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ sub(scratch1, sp, scratch1);
@@ -950,18 +957,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
- Register scratch2 = bytecode;
+ Register scratch3 = bytecode;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -992,7 +1008,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the size of the current bytecode.
__ bind(&process_bytecode);
-// Bailout to the return label if this is a return bytecode.
+ // Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ cmpi(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
@@ -1000,10 +1016,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmpi(bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ bne(&not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ b(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ShiftLeftImm(scratch2, bytecode, Operand(2));
- __ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
- __ add(bytecode_offset, bytecode_offset, scratch2);
+ __ ShiftLeftImm(scratch3, bytecode, Operand(2));
+ __ lwzx(scratch3, MemOperand(bytecode_size_table, scratch3));
+ __ add(bytecode_offset, bytecode_offset, scratch3);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -1118,7 +1148,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ sub(r8, sp, r5);
- LoadRealStackLimit(masm, r0);
+ LoadStackLimit(masm, r0, StackLimitKind::kRealStackLimit);
__ cmpl(r8, r0);
__ blt(&stack_overflow);
@@ -1148,6 +1178,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StorePX(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ LoadStackLimit(masm, r6, StackLimitKind::kInterruptStackLimit);
+ __ cmpl(sp, r6);
+ __ blt(&stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1181,7 +1219,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5,
+ kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&do_return);
__ b(&do_dispatch);
@@ -1190,6 +1228,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r5);
__ blr();
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ StoreP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(r6, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r6,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&optimized_code_slot_not_empty);
Label maybe_has_optimized_code;
// Check if optimized code marker is actually a weak reference to the
@@ -1394,6 +1457,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmpi(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ bge(&okay);
+ __ bkpt(0);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1413,6 +1486,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmpi(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ beq(&function_entry_bytecode);
+
// Load the current bytecode.
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1420,9 +1499,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5,
+ kInterpreterBytecodeOffsetRegister, r4, r5, r6,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
__ StoreP(r5,
@@ -1430,6 +1510,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -2083,7 +2172,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- LoadRealStackLimit(masm, scratch);
+ LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmpl(sp, scratch);
}
__ bgt(&done); // Signed comparison.
@@ -2325,7 +2414,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
@@ -2532,6 +2621,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(r11);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ LoadSmiLiteral(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3192,4 +3303,4 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq
index 8828ab84d2..95ca356a0c 100644
--- a/deps/v8/src/builtins/promise-abstract-operations.tq
+++ b/deps/v8/src/builtins/promise-abstract-operations.tq
@@ -40,7 +40,8 @@ namespace promise {
EnqueueMicrotask(Context, Microtask): Undefined;
macro
- ExtractHandlerContext(implicit context: Context)(handler: Callable|Undefined):
+ ExtractHandlerContextInternal(implicit context: Context)(handler: Callable|
+ Undefined):
Context labels NotFound {
let iter: JSAny = handler;
while (true) {
@@ -62,16 +63,25 @@ namespace promise {
goto NotFound;
}
- // According to the HTML specification, we use the handler's context to
- // EnqueueJob for Promise resolution.
+ macro
+ ExtractHandlerContext(implicit context: Context)(handler: Callable|
+ Undefined): Context {
+ try {
+ return ExtractHandlerContextInternal(handler) otherwise NotFound;
+ }
+ label NotFound deferred {
+ return context;
+ }
+ }
+
macro
ExtractHandlerContext(implicit context: Context)(
primary: Callable|Undefined, secondary: Callable|Undefined): Context {
try {
- return ExtractHandlerContext(primary) otherwise NotFound;
+ return ExtractHandlerContextInternal(primary) otherwise NotFound;
}
label NotFound deferred {
- return ExtractHandlerContext(secondary) otherwise Default;
+ return ExtractHandlerContextInternal(secondary) otherwise Default;
}
label Default deferred {
return context;
@@ -92,6 +102,9 @@ namespace promise {
secondaryHandler = promiseReaction.fulfill_handler;
}
+ // According to HTML, we use the context of the appropriate handler as the
+ // context of the microtask. See step 3 of HTML's EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
const handlerContext: Context =
ExtractHandlerContext(primaryHandler, secondaryHandler);
@@ -102,7 +115,8 @@ namespace promise {
kPromiseReactionSize ==
kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks);
if constexpr (reactionType == kPromiseReactionFulfill) {
- promiseReaction.map = PromiseFulfillReactionJobTaskMapConstant();
+ * UnsafeConstCast(& promiseReaction.map) =
+ PromiseFulfillReactionJobTaskMapConstant();
const promiseReactionJobTask =
UnsafeCast<PromiseFulfillReactionJobTask>(promiseReaction);
promiseReactionJobTask.argument = argument;
@@ -116,7 +130,8 @@ namespace promise {
kPromiseReactionJobTaskPromiseOrCapabilityOffset);
} else {
StaticAssert(reactionType == kPromiseReactionReject);
- promiseReaction.map = PromiseRejectReactionJobTaskMapConstant();
+ * UnsafeConstCast(& promiseReaction.map) =
+ PromiseRejectReactionJobTaskMapConstant();
const promiseReactionJobTask =
UnsafeCast<PromiseRejectReactionJobTask>(promiseReaction);
promiseReactionJobTask.argument = argument;
@@ -416,34 +431,33 @@ namespace promise {
// PromiseReaction holding both the onFulfilled and onRejected callbacks.
// Once the {promise} is resolved we decide on the concrete handler to
// push onto the microtask queue.
+ const handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
const promiseReactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
const reaction = NewPromiseReaction(
- promiseReactions, resultPromiseOrCapability, onFulfilled, onRejected);
+ handlerContext, promiseReactions, resultPromiseOrCapability,
+ onFulfilled, onRejected);
promise.reactions_or_result = reaction;
} else {
- let map: Map;
- let handler: Callable|Undefined = Undefined;
+ const reactionsOrResult = promise.reactions_or_result;
+ let microtask: PromiseReactionJobTask;
let handlerContext: Context;
if (promise.Status() == PromiseState::kFulfilled) {
- map = PromiseFulfillReactionJobTaskMapConstant();
- handler = onFulfilled;
handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
+ microtask = NewPromiseFulfillReactionJobTask(
+ handlerContext, reactionsOrResult, onFulfilled,
+ resultPromiseOrCapability);
} else
deferred {
assert(promise.Status() == PromiseState::kRejected);
- map = PromiseRejectReactionJobTaskMapConstant();
- handler = onRejected;
handlerContext = ExtractHandlerContext(onRejected, onFulfilled);
+ microtask = NewPromiseRejectReactionJobTask(
+ handlerContext, reactionsOrResult, onRejected,
+ resultPromiseOrCapability);
if (!promise.HasHandler()) {
runtime::PromiseRevokeReject(promise);
}
}
-
- const reactionsOrResult = promise.reactions_or_result;
- const microtask = NewPromiseReactionJobTask(
- map, handlerContext, reactionsOrResult, handler,
- resultPromiseOrCapability);
EnqueueMicrotask(handlerContext, microtask);
}
promise.SetHasHandler();
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index 7996cc5b3d..61461de29f 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -16,10 +16,6 @@ namespace promise_internal {
void;
extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
-
- extern macro PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
- Context): HeapObject;
-
}
namespace promise {
@@ -45,7 +41,7 @@ namespace promise {
assert(IsFunctionWithPrototypeSlotMap(promiseFun.map));
const promiseMap = UnsafeCast<Map>(promiseFun.prototype_or_initial_map);
const promiseHeapObject = promise_internal::AllocateJSPromise(context);
- promiseHeapObject.map = promiseMap;
+ * UnsafeConstCast(& promiseHeapObject.map) = promiseMap;
const promise = UnsafeCast<JSPromise>(promiseHeapObject);
promise.properties_or_hash = kEmptyFixedArray;
promise.elements = kEmptyFixedArray;
@@ -54,20 +50,36 @@ namespace promise {
return promise;
}
- macro NewPromiseReactionJobTask(implicit context: Context)(
- map: Map, handlerContext: Context, argument: Object,
- handler: Callable|Undefined,
+ macro NewPromiseFulfillReactionJobTask(implicit context: Context)(
+ handlerContext: Context, argument: Object, handler: Callable|Undefined,
promiseOrCapability: JSPromise|PromiseCapability|
- Undefined): PromiseReactionJobTask {
- const taskHeapObject =
- promise_internal::AllocatePromiseReactionJobTask(context);
- taskHeapObject.map = map;
- const jobTask = UnsafeCast<PromiseReactionJobTask>(taskHeapObject);
- jobTask.argument = argument;
- jobTask.context = handlerContext;
- jobTask.handler = handler;
- jobTask.promise_or_capability = promiseOrCapability;
- return jobTask;
+ Undefined): PromiseFulfillReactionJobTask {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseFulfillReactionJobTask{
+ map: PromiseFulfillReactionJobTaskMapConstant(),
+ argument,
+ context: handlerContext,
+ handler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
+ }
+
+ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
+ handlerContext: Context, argument: Object, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|
+ Undefined): PromiseRejectReactionJobTask {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseRejectReactionJobTask{
+ map: PromiseRejectReactionJobTaskMapConstant(),
+ argument,
+ context: handlerContext,
+ handler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
}
// These allocate and initialize a promise with pending state and
@@ -113,16 +125,19 @@ namespace promise {
}
macro NewPromiseReaction(implicit context: Context)(
- next: Zero|PromiseReaction,
+ handlerContext: Context, next: Zero|PromiseReaction,
promiseOrCapability: JSPromise|PromiseCapability|Undefined,
fulfillHandler: Callable|Undefined,
rejectHandler: Callable|Undefined): PromiseReaction {
+ const nativeContext = LoadNativeContext(handlerContext);
return new PromiseReaction{
map: PromiseReactionMapConstant(),
next: next,
reject_handler: rejectHandler,
fulfill_handler: fulfillHandler,
- promise_or_capability: promiseOrCapability
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
};
}
@@ -130,10 +145,10 @@ namespace promise {
macro NewPromiseResolveThenableJobTask(implicit context: Context)(
promiseToResolve: JSPromise, then: JSReceiver, thenable: JSReceiver,
- thenableContext: Context): PromiseResolveThenableJobTask {
+ thenContext: Context): PromiseResolveThenableJobTask {
return new PromiseResolveThenableJobTask{
map: PromiseResolveThenableJobTaskMapConstant(),
- context: thenableContext,
+ context: thenContext,
promise_to_resolve: promiseToResolve,
then: then,
thenable: thenable
diff --git a/deps/v8/src/builtins/promise-reaction-job.tq b/deps/v8/src/builtins/promise-reaction-job.tq
index 1db33f4a7a..1d20d22efb 100644
--- a/deps/v8/src/builtins/promise-reaction-job.tq
+++ b/deps/v8/src/builtins/promise-reaction-job.tq
@@ -17,27 +17,28 @@ namespace promise {
promiseOrCapability: JSPromise|PromiseCapability|Undefined, reason: JSAny,
reactionType: constexpr PromiseReactionType): JSAny {
if constexpr (reactionType == kPromiseReactionReject) {
- if (IsJSPromise(promiseOrCapability)) {
- // For fast native promises we can skip the indirection via the
- // promiseCapability.[[Reject]] function and run the resolve logic
- // directly from here.
- return RejectPromise(
- UnsafeCast<JSPromise>(promiseOrCapability), reason, False);
- } else
- deferred {
- assert(IsPromiseCapability(promiseOrCapability));
+ typeswitch (promiseOrCapability) {
+ case (promise: JSPromise): {
+ // For fast native promises we can skip the indirection via the
+ // promiseCapability.[[Reject]] function and run the resolve logic
+ // directly from here.
+ return RejectPromise(promise, reason, False);
+ }
+ case (Undefined): {
+ return Undefined;
+ }
+ case (capability: PromiseCapability): {
// In the general case we need to call the (user provided)
// promiseCapability.[[Reject]] function.
try {
- const promiseCapability =
- UnsafeCast<PromiseCapability>(promiseOrCapability);
- const reject = UnsafeCast<Callable>(promiseCapability.reject);
+ const reject = UnsafeCast<Callable>(capability.reject);
return Call(context, reject, Undefined, reason);
} catch (e) {
// Swallow the exception here.
return runtime::ReportMessage(e);
}
}
+ }
} else {
StaticAssert(reactionType == kPromiseReactionFulfill);
// We have to call out to the dedicated PromiseRejectReactionJob
@@ -53,20 +54,20 @@ namespace promise {
context: Context,
promiseOrCapability: JSPromise|PromiseCapability|Undefined, result: JSAny,
reactionType: constexpr PromiseReactionType): JSAny {
- if (IsJSPromise(promiseOrCapability)) {
- // For fast native promises we can skip the indirection via the
- // promiseCapability.[[Resolve]] function and run the resolve logic
- // directly from here.
- return ResolvePromise(
- context, UnsafeCast<JSPromise>(promiseOrCapability), result);
- } else
- deferred {
- assert(IsPromiseCapability(promiseOrCapability));
+ typeswitch (promiseOrCapability) {
+ case (promise: JSPromise): {
+ // For fast native promises we can skip the indirection via the
+ // promiseCapability.[[Resolve]] function and run the resolve logic
+ // directly from here.
+ return ResolvePromise(context, promise, result);
+ }
+ case (Undefined): {
+ return Undefined;
+ }
+ case (capability: PromiseCapability): {
// In the general case we need to call the (user provided)
// promiseCapability.[[Resolve]] function.
- const promiseCapability =
- UnsafeCast<PromiseCapability>(promiseOrCapability);
- const resolve = UnsafeCast<Callable>(promiseCapability.resolve);
+ const resolve = UnsafeCast<Callable>(capability.resolve);
try {
return Call(context, resolve, Undefined, result);
} catch (e) {
@@ -74,6 +75,7 @@ namespace promise {
context, promiseOrCapability, e, reactionType);
}
}
+ }
}
// https://tc39.es/ecma262/#sec-promisereactionjob
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index af7dd7afa0..0fc98b556b 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -177,7 +177,14 @@ namespace promise {
label Enqueue {
// 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
// «promise, resolution, thenAction»).
- const nativeContext = LoadNativeContext(context);
+
+ // According to HTML, we use the context of the then function
+ // (|thenAction|) as the context of the microtask. See step 3 of HTML's
+ // EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
+ const thenContext: Context =
+ ExtractHandlerContext(UnsafeCast<Callable>(then));
+ const nativeContext = LoadNativeContext(thenContext);
const task = NewPromiseResolveThenableJobTask(
promise, UnsafeCast<JSReceiver>(then),
UnsafeCast<JSReceiver>(resolution), nativeContext);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index b9e56659f7..88bb80891e 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -6,21 +6,22 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -58,10 +59,15 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-MemOperand RealStackLimitAsMemOperand(MacroAssembler* masm) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+MemOperand StackLimitAsMemOperand(MacroAssembler* masm, StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -75,7 +81,8 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ LoadP(scratch, RealStackLimitAsMemOperand(masm));
+ __ LoadP(scratch,
+ StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ SubP(scratch, sp, scratch);
@@ -196,7 +203,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
__ JumpIfIsInRange(r6, kDefaultDerivedConstructor, kDerivedConstructor,
@@ -366,8 +374,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ bne(&done, Label::kNear);
- __ LoadP(sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
}
@@ -381,14 +390,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r3);
// Store input value into generator object.
- __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
- r0);
+ __ StoreTaggedField(
+ r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
- __ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(r6, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -416,12 +427,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ LoadP(scratch, RealStackLimitAsMemOperand(masm));
+ __ LoadP(scratch,
+ StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
__ CmpLogicalP(sp, scratch);
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
// ----------- S t a t e -------------
@@ -433,28 +446,32 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadLogicalHalfWordP(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadP(r4, FieldMemOperand(
- r3, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedPointerField(
+ r4,
+ FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
- __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
- __ SubP(sp, r5);
+ __ ShiftLeftP(r1, r5, Operand(kSystemPointerSizeLog2));
+ __ SubP(sp, r1);
+
+ __ ShiftLeftP(r5, r5, Operand(kTaggedSizeLog2));
// ip = stack offset
// r5 = parameter array offset
__ LoadImmP(ip, Operand::Zero());
- __ SubP(r5, Operand(kSystemPointerSize));
+ __ SubP(r5, Operand(kTaggedSize));
__ blt(&done_loop);
- __ lgfi(r1, Operand(-kSystemPointerSize));
+ __ lghi(r1, Operand(-kTaggedSize));
__ bind(&loop);
// parameter copy loop
- __ LoadP(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
+ __ LoadAnyTaggedField(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
__ StoreP(r0, MemOperand(sp, ip));
// update offsets
@@ -467,8 +484,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, r5, ip);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
@@ -482,7 +501,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
}
@@ -494,7 +513,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3);
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -504,7 +524,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r3);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r3);
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -761,7 +782,8 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadP(scratch1, RealStackLimitAsMemOperand(masm));
+ __ LoadP(scratch1,
+ StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ SubP(scratch1, sp, scratch1);
@@ -902,8 +924,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
// Store code entry in the closure.
- __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
- r0);
+ __ StoreTaggedField(optimized_code,
+ FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ LoadRR(scratch1,
optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
@@ -952,8 +974,9 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ LoadP(scratch, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
+ __ LoadTaggedPointerField(
+ scratch,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadW(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
@@ -1008,18 +1031,27 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
- Register scratch2 = bytecode;
+ Register scratch3 = bytecode;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ bytecode, original_bytecode_offset));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
+ __ Move(original_bytecode_offset, bytecode_offset);
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label process_bytecode, extra_wide;
@@ -1050,7 +1082,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the size of the current bytecode.
__ bind(&process_bytecode);
-// Bailout to the return label if this is a return bytecode.
+ // Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ CmpP(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
@@ -1058,10 +1090,24 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ CmpP(bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ bne(&not_jump_loop);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ b(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ShiftLeftP(scratch2, bytecode, Operand(2));
- __ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
- __ AddP(bytecode_offset, bytecode_offset, scratch2);
+ __ ShiftLeftP(scratch3, bytecode, Operand(2));
+ __ LoadlW(scratch3, MemOperand(bytecode_size_table, scratch3));
+ __ AddP(bytecode_offset, bytecode_offset, scratch3);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1086,10 +1132,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
// The bytecode array could have been flushed from the shared function info,
@@ -1100,15 +1148,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadP(r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadLogicalHalfWordP(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
__ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
@@ -1116,9 +1166,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimized_code_entry = r6;
// Read off the optimized code slot in the feedback vector.
- __ LoadP(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
@@ -1172,7 +1223,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ SubP(r8, sp, r4);
- __ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm));
+ __ CmpLogicalP(
+ r8, StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
__ blt(&stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1202,6 +1254,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreP(r5, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ LoadP(r5,
+ StackLimitAsMemOperand(masm, StackLimitKind::kInterruptStackLimit));
+ __ CmpLogicalP(sp, r5);
+ __ blt(&stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1236,7 +1297,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4,
+ kInterpreterBytecodeOffsetRegister, r3, r4, r5,
&do_return);
__ b(&do_dispatch);
@@ -1254,6 +1315,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ StoreP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r5,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&maybe_has_optimized_code);
// Load code entry from the weak reference, if it was cleared, resume
// execution of unoptimized code.
@@ -1401,15 +1487,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
- __ LoadP(r4,
- FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
@@ -1447,6 +1535,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CmpP(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ bge(&okay);
+ __ bkpt(0);
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
@@ -1466,6 +1563,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ Label enter_bytecode, function_entry_bytecode;
+ __ CmpP(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ beq(&function_entry_bytecode);
+
// Load the current bytecode.
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
@@ -1473,9 +1576,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance to the next bytecode.
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4,
+ kInterpreterBytecodeOffsetRegister, r3, r4, r5,
&if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
__ StoreP(r4,
@@ -1483,6 +1587,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ b(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1580,13 +1693,14 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ LoadP(r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
- __ SmiUntag(r3);
+ __ SmiUntagField(
+ r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1865,7 +1979,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
Label ok, fail;
__ AssertNotSmi(r4);
- __ LoadP(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(scratch,
+ FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadHalfWordP(scratch,
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ CmpP(scratch, Operand(FIXED_ARRAY_TYPE));
@@ -1890,13 +2005,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label loop, no_args, skip;
__ CmpP(r6, Operand::Zero());
__ beq(&no_args);
- __ AddP(
- r4, r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize));
+ __ AddP(r4, r4,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(scratch, MemOperand(r4, kSystemPointerSize));
- __ la(r4, MemOperand(r4, kSystemPointerSize));
+ __ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
+ __ la(r4, MemOperand(r4, kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -1930,7 +2044,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r5, &new_target_not_constructor);
- __ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(scratch,
+ FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
__ bne(&new_target_constructor);
@@ -1954,7 +2069,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ beq(&arguments_adaptor);
{
__ LoadP(r7, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadLogicalHalfWordP(
r7,
FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2014,7 +2130,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
@@ -2022,7 +2139,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ AndP(r0, r5,
@@ -2076,7 +2194,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r2, r3);
__ SmiUntag(r2);
}
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ ShiftLeftP(r6, r2, Operand(kSystemPointerSizeLog2));
@@ -2115,9 +2234,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r4 and length of that into r6.
Label no_bound_arguments;
- __ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
- __ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ SmiUntag(r6);
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ LoadAndTestP(r6, r6);
__ beq(&no_bound_arguments);
{
@@ -2133,16 +2252,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ LoadRR(scratch, sp); // preserve previous stack pointer
__ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2));
- __ SubP(sp, sp, r9);
+ __ SubP(r1, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CmpLogicalP(sp, RealStackLimitAsMemOperand(masm));
+ __ CmpLogicalP(
+ r1, StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
- __ LoadRR(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2151,10 +2269,12 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+ __ LoadRR(scratch, sp);
+ __ LoadRR(sp, r1);
+
// Relocate arguments down the stack.
// -- r2 : the number of arguments (not including the receiver)
// -- r8 : the previous stack pointer
- // -- r9: the size of the [[BoundArguments]]
{
Label skip, loop;
__ LoadImmP(r7, Operand::Zero());
@@ -2164,7 +2284,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r0, MemOperand(scratch, r7));
__ StoreP(r0, MemOperand(sp, r7));
- __ AddP(r7, r7, Operand(kSystemPointerSize));
+ __ lay(r7, MemOperand(r7, kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&skip);
}
@@ -2172,14 +2292,14 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
- __ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(r4, r4, r9);
+ __ ShiftLeftP(r9, r6, Operand(kTaggedSizeLog2));
+ __ lay(r4, MemOperand(r4, r9, FixedArray::kHeaderSize - kHeapObjectTag));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r4, -kSystemPointerSize));
- __ lay(r4, MemOperand(r4, -kSystemPointerSize));
- __ StoreP(r0, MemOperand(sp, r7));
- __ AddP(r7, r7, Operand(kSystemPointerSize));
+ __ LoadAnyTaggedField(ip, MemOperand(r4, -kTaggedSize), r0);
+ __ lay(r4, MemOperand(r4, -kTaggedSize));
+ __ StoreP(ip, MemOperand(sp, r7));
+ __ lay(r7, MemOperand(r7, kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ AddP(r2, r2, r6);
}
@@ -2198,7 +2318,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r3);
// Patch the receiver to [[BoundThis]].
- __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+ __ LoadAnyTaggedField(r5,
+ FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r1));
@@ -2206,8 +2327,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadP(r3,
- FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2275,7 +2396,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ beq(&call_generic_stub);
@@ -2303,15 +2425,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
Label skip;
- __ CmpP(r3, r5);
+ __ CompareTagged(r3, r5);
__ bne(&skip);
- __ LoadP(r5,
- FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadP(r3,
- FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2329,7 +2451,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(r3, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r4, Map::Bits1::IsConstructorBit::kShift);
__ beq(&non_constructor);
@@ -2379,9 +2501,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ tmll(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ tmll(r4, Operand(kDontAdaptArgumentsSentinel));
__ b(Condition(1), &dont_adapt_arguments);
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ tmlh(r6,
Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask >>
@@ -2485,7 +2608,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ CallCodeObject(r4);
// Store offset of return address for deoptimizer.
@@ -2537,8 +2660,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ __ RecordComment("-- Call without adapting args --");
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
__ bind(&stack_overflow);
@@ -2588,6 +2712,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Jump(ip);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ LoadSmiLiteral(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -3144,14 +3290,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadAnyTaggedField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
@@ -3200,7 +3348,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadP(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index e3f39a0906..4739e18c57 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/compiler/code-assembler.h"
#include "src/execution/isolate.h"
@@ -42,8 +43,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options;
}
- const base::AddressRegion& code_range =
- isolate->heap()->memory_allocator()->code_range();
+ const base::AddressRegion& code_range = isolate->heap()->code_range();
bool pc_relative_calls_fit_in_code_range =
!code_range.is_empty() &&
std::ceil(static_cast<float>(code_range.size() / MB)) <=
@@ -97,6 +97,7 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
ExternalAssemblerBuffer(buffer, kBufferSize));
masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
+ masm.CodeEntry();
generator(&masm);
int handler_table_offset = 0;
@@ -159,7 +160,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
- (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
+ (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(
isolate, &zone, argc_with_recv, Code::BUILTIN, name,
PoisoningMitigationLevel::kDontPoison, builtin_index);
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index 1c14be6cb4..e3e72ae7b5 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -36,7 +36,7 @@ namespace string {
try {
// 3. Let n be ? ToInteger(count).
- typeswitch (ToInteger_Inline(count, kTruncateMinusZero)) {
+ typeswitch (ToInteger_Inline(count)) {
case (n: Smi): {
// 4. If n < 0, throw a RangeError exception.
if (n < 0) goto InvalidCount;
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 47e91c93dc..85c43342cf 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -17,13 +17,12 @@ namespace torque_internal {
const offset: intptr;
unsafeMarker: Unsafe;
}
+ type ConstReference<T: type> extends Reference<T>;
+ type MutableReference<T: type> extends ConstReference<T>;
macro UnsafeNewReference<T: type>(object: HeapObject, offset: intptr):&T {
- return Reference<T>{
- object: object,
- offset: offset,
- unsafeMarker: Unsafe {}
- };
+ return %RawDownCast<&T>(
+ Reference<T>{object: object, offset: offset, unsafeMarker: Unsafe {}});
}
struct Slice<T: type> {
@@ -148,7 +147,7 @@ namespace torque_internal {
type UninitializedHeapObject extends HeapObject;
extern macro AllocateAllowLOS(intptr): UninitializedHeapObject;
- extern macro GetStructMap(constexpr InstanceType): Map;
+ extern macro GetInstanceTypeMap(constexpr InstanceType): Map;
macro Allocate(sizeInBytes: intptr, map: Map): UninitializedHeapObject {
assert(ValidAllocationSize(sizeInBytes, map));
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 77a0b404be..e5398fc50a 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -13,8 +13,6 @@ namespace typed_array {
extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction;
- extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
- bool;
extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
JSTypedArray): void;
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 99f052d695..8d028c88f0 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -5,17 +5,18 @@
#if V8_TARGET_ARCH_X64
#include "src/api/api-arguments.h"
+#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/logging/counters.h"
-// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
-#include "src/codegen/macro-assembler-inl.h"
-#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/debug-objects.h"
#include "src/objects/foreign.h"
@@ -23,6 +24,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -65,10 +67,15 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-Operand RealStackLimitAsOperand(MacroAssembler* masm) {
+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
+Operand StackLimitAsOperand(MacroAssembler* masm, StackLimitKind kind) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
- ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
@@ -84,7 +91,8 @@ void Generate_StackOverflowCheck(
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
- __ movq(kScratchRegister, RealStackLimitAsOperand(masm));
+ __ movq(kScratchRegister,
+ StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ movq(scratch, rsp);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
@@ -116,31 +124,22 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(rsi);
__ Push(rcx);
+#ifdef V8_REVERSE_JSARGS
+ // Set up pointer to first argument (skip receiver).
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+#else
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-
// Set up pointer to last argument.
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- // ----------- S t a t e -------------
- // -- rax: number of arguments (untagged)
- // -- rdi: constructor function
- // -- rdx: new target
- // -- rbx: pointer to last argument
- // -- rcx: counter
- // -- sp[0*kSystemPointerSize]: the hole (receiver)
- // -- sp[1*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[2*kSystemPointerSize]: context
- // -----------------------------------
- __ jmp(&entry);
- __ bind(&loop);
- __ Push(Operand(rbx, rcx, times_system_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
+#endif
// Call the function.
// rax: number of arguments (untagged)
@@ -238,28 +237,34 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(rdx);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
+ // Push the allocated receiver to the stack.
__ Push(rax);
+
+#ifdef V8_REVERSE_JSARGS
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movq(r8, rax);
+
+ // Set up pointer to first argument (skip receiver).
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+#else
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver.
__ Push(rax);
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize] implicit receiver
- // -- sp[1*kSystemPointerSize] implicit receiver
- // -- sp[2*kSystemPointerSize] padding
- // -- sp[3*kSystemPointerSize] constructor function
- // -- sp[4*kSystemPointerSize] number of arguments (tagged)
- // -- sp[5*kSystemPointerSize] context
- // -----------------------------------
+ // Set up pointer to last argument.
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+#endif
// Restore constructor function and argument count.
__ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
- // Set up pointer to last argument.
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
Label enough_stack_space, stack_overflow;
@@ -275,27 +280,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- // ----------- S t a t e -------------
- // -- rax: number of arguments (untagged)
- // -- rdx: new target
- // -- rbx: pointer to last argument
- // -- rcx: counter (tagged)
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: implicit receiver
- // -- sp[2*kSystemPointerSize]: padding
- // -- rdi and sp[3*kSystemPointerSize]: constructor function
- // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[5*kSystemPointerSize]: context
- // -----------------------------------
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ Push(Operand(rbx, rcx, times_system_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
+
+#ifdef V8_REVERSE_JSARGS
+ // Push implicit receiver.
+ __ Push(r8);
+#endif
// Call the function.
__ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
@@ -606,9 +597,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
IsolateAddressId::kContextAddress, masm->isolate());
__ movq(rsi, masm->ExternalReferenceAsOperand(context_address));
- // Push the function and the receiver onto the stack.
+ // Push the function onto the stack.
__ Push(rdi);
+
+#ifndef V8_REVERSE_JSARGS
+ // Push the receiver onto the stack.
__ Push(arg_reg_4);
+#endif
#ifdef V8_TARGET_OS_WIN
// Load the previous frame pointer to access C arguments on stack
@@ -620,18 +615,25 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r8);
__ movq(rbx, r9);
+#ifdef V8_REVERSE_JSARGS
+ __ movq(r9, arg_reg_4); // Temporarily saving the receiver.
+#endif
#endif // V8_TARGET_OS_WIN
- // Current stack contents:
- // [rsp + 2 * kSystemPointerSize ... ] : Internal frame
- // [rsp + kSystemPointerSize] : function
- // [rsp] : receiver
+ // Current stack contents if V8_REVERSE_JSARGS:
+ // [rsp + kSystemPointerSize] : Internal frame
+ // [rsp] : function
+ // Current stack contents if not V8_REVERSE_JSARGS:
+ // [rsp + 2 * kSystemPointerSize] : Internal frame
+ // [rsp + kSystemPointerSize] : function
+ // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
// rsi : context
// rdi : function
// rdx : new.target
+ // r9 : receiver, if V8_REVERSE_JSARGS
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
@@ -649,6 +651,20 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
+#ifdef V8_REVERSE_JSARGS
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop, Label::kNear);
+
+ // Push the receiver.
+ __ Push(r9);
+#else
Label loop, entry;
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry, Label::kNear);
@@ -659,6 +675,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(not_equal, &loop, Label::kNear);
+#endif
// Invoke the builtin code.
Handle<Code> builtin = is_construct
@@ -745,22 +762,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ cmpq(rsp, RealStackLimitAsOperand(masm));
+ __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// Pop return address.
__ PopReturnAddressTo(rax);
+#ifndef V8_REVERSE_JSARGS
// Push receiver.
__ PushTaggedPointerField(
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1);
+#endif
// ----------- S t a t e -------------
// -- rax : return address
// -- rdx : the JSGeneratorObject to resume
// -- rdi : generator function
// -- rsi : generator context
- // -- rsp[0] : generator receiver
+ // -- rsp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -773,6 +792,27 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
+#ifdef V8_REVERSE_JSARGS
+ {
+ Label done_loop, loop;
+ __ movq(r9, rcx);
+
+ __ bind(&loop);
+ __ decq(r9);
+ __ j(less, &done_loop, Label::kNear);
+ __ PushTaggedAnyField(
+ FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
+ decompr_scratch1);
+ __ jmp(&loop);
+
+ __ bind(&done_loop);
+ }
+
+ // Push the receiver.
+ __ PushTaggedPointerField(
+ FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
+ decompr_scratch1);
+#else
Label done_loop, loop;
__ Set(r9, 0);
@@ -786,6 +826,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&loop);
__ bind(&done_loop);
+#endif
}
// Underlying function needs to have bytecode available.
@@ -963,15 +1004,25 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Label* if_return) {
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
- DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch2;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+
+ __ movq(original_bytecode_offset, bytecode_offset);
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address());
@@ -1013,9 +1064,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ cmpb(bytecode,
+ Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ __ j(not_equal, &not_jump_loop, Label::kNear);
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ movq(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end, Label::kNear);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ addl(bytecode_offset,
Operand(bytecode_size_table, bytecode, times_int_size, 0));
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1127,7 +1192,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ movq(rax, rsp);
__ subq(rax, rcx);
- __ cmpq(rax, RealStackLimitAsOperand(masm));
+ __ cmpq(rax, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1156,6 +1221,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx);
__ bind(&no_incoming_new_target_or_generator_register);
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kInterruptStackLimit));
+ __ j(below, &stack_check_interrupt);
+ __ bind(&after_stack_check_interrupt);
+
// The accumulator is already loaded with undefined.
// Load the dispatch table into a register and dispatch to the bytecode
@@ -1188,7 +1260,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- &do_return);
+ r11, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1196,6 +1268,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ Move(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ movq(kInterpreterBytecodeArrayRegister,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ movq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(rcx, kInterpreterBytecodeArrayRegister);
+ __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rcx);
+
+ __ jmp(&after_stack_check_interrupt);
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
@@ -1224,21 +1318,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args,
Register start_address,
Register scratch) {
- // Find the address of the last argument.
- __ Move(scratch, num_args);
- __ shlq(scratch, Immediate(kSystemPointerSizeLog2));
+ // Find the argument with lowest address.
+ __ movq(scratch, num_args);
__ negq(scratch);
- __ addq(scratch, start_address);
-
+ __ leaq(start_address,
+ Operand(start_address, scratch, times_system_pointer_size,
+ kSystemPointerSize));
// Push the arguments.
- Label loop_header, loop_check;
- __ j(always, &loop_check, Label::kNear);
- __ bind(&loop_header);
- __ Push(Operand(start_address, 0));
- __ subq(start_address, Immediate(kSystemPointerSize));
- __ bind(&loop_check);
- __ cmpq(start_address, scratch);
- __ j(above, &loop_header, Label::kNear);
+#ifdef V8_REVERSE_JSARGS
+ __ PushArray(start_address, num_args, scratch,
+ TurboAssembler::PushArrayOrder::kReverse);
+#else
+ __ PushArray(start_address, num_args, scratch);
+#endif
}
// static
@@ -1255,7 +1347,13 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
- // Number of values to be pushed.
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ decl(rax);
+ }
+#endif
+
__ leal(rcx, Operand(rax, 1)); // Add one for receiver.
// Add a stack check before pushing arguments.
@@ -1264,6 +1362,27 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
+#ifdef V8_REVERSE_JSARGS
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver.
+ __ decq(rcx);
+ }
+
+ // rbx and rdx will be modified.
+ Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
+
+ // Push "undefined" as the receiver arg if we need to.
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register rbx.
+ // rbx already points to the penultime argument, the spread
+ // is below that.
+ __ movq(rbx, Operand(rbx, -kSystemPointerSize));
+ }
+#else
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
@@ -1277,6 +1396,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ Pop(rbx); // Pass the spread in a register
__ decl(rax); // Subtract one for spread
}
+#endif
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
@@ -1319,16 +1439,33 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ decl(rax);
+ }
+
+ // rcx and r8 will be modified.
+ Generate_InterpreterPushArgs(masm, rax, rcx, r8);
+
+ // Push slot for the receiver to be constructed.
+ __ Push(Immediate(0));
+#else
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
// rcx and r8 will be modified.
Generate_InterpreterPushArgs(masm, rax, rcx, r8);
+#endif
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+#ifdef V8_REVERSE_JSARGS
+ // Pass the spread in the register rbx.
+ __ movq(rbx, Operand(rcx, -kSystemPointerSize));
+#else
__ Pop(rbx); // Pass the spread in a register
__ decl(rax); // Subtract one for spread
-
+#endif
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
} else {
@@ -1423,6 +1560,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmpq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ j(greater_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
// Dispatch to the target bytecode.
__ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
@@ -1439,6 +1585,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ Label enter_bytecode, function_entry_bytecode;
+ __ cmpq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ j(equal, &function_entry_bytecode);
+
// Load the current bytecode.
__ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
@@ -1447,8 +1599,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- &if_return);
+ r11, &if_return);
+ __ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(kInterpreterBytecodeOffsetRegister);
__ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
@@ -1456,6 +1609,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ movq(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&enter_bytecode);
+
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
@@ -1544,9 +1706,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // -- rsp[8] : argArray
- // -- rsp[16] : thisArg
- // -- rsp[24] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : thisArg
+ // -- args[2] : argArray
// -----------------------------------
// 1. Load receiver into rdi, argArray into rbx (if present), remove all
@@ -1554,17 +1717,17 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// present) instead.
{
Label no_arg_array, no_this_arg;
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movq(rbx, rdx);
- __ movq(rdi, args.GetReceiverOperand());
+ __ movq(rdi, args[0]);
__ testq(rax, rax);
__ j(zero, &no_this_arg, Label::kNear);
{
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movq(rdx, args[1]);
__ cmpq(rax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
- __ movq(rbx, args.GetArgumentOperand(2));
+ __ movq(rbx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
@@ -1606,6 +1769,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// static
+// TODO(victor): merge steps 1, 2 and 3 when V8_REVERSE_JSARGS is set.
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
@@ -1613,17 +1777,23 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// rsp[16] : Argument n-1
// ...
// rsp[8 * n] : Argument 1
- // rsp[8 * (n + 1)] : Receiver (callable to call)
- //
+ // rsp[8 * (n + 1)] : Argument 0 (receiver: callable to call)
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
// rax contains the number of arguments, n, not counting the receiver.
- //
+
// 1. Make sure we have at least one argument.
{
Label done;
__ testq(rax, rax);
__ j(not_zero, &done, Label::kNear);
__ PopReturnAddressTo(rbx);
+#ifdef V8_REVERSE_JSARGS
+ __ Pop(kScratchRegister); // Pop the receiver.
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(kScratchRegister);
+#else
__ PushRoot(RootIndex::kUndefinedValue);
+#endif
__ PushReturnAddressFrom(rbx);
__ incq(rax);
__ bind(&done);
@@ -1631,25 +1801,34 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the callable to call (passed as receiver) from the stack.
{
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ movq(rdi, args.GetReceiverOperand());
}
+#ifdef V8_REVERSE_JSARGS
+ // 3. Shift return address one slot down on the stack (overwriting the
+ // original receiver), making the original first argument the new receiver.
+ {
+ __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
+ __ decq(rax); // One fewer argument (first argument is new receiver).
+ }
+#else
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
{
Label loop;
__ movq(rcx, rax);
- StackArgumentsAccessor args(rsp, rcx);
+ StackArgumentsAccessor args(rcx);
__ bind(&loop);
- __ movq(rbx, args.GetArgumentOperand(1));
- __ movq(args.GetArgumentOperand(0), rbx);
+ __ movq(rbx, args[1]);
+ __ movq(args[0], rbx);
__ decq(rcx);
__ j(not_zero, &loop); // While non-zero.
__ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
__ decq(rax); // One fewer argument (first argument is new receiver).
}
+#endif
// 4. Call the callable.
// Since we did not create a frame for Function.prototype.call() yet,
@@ -1661,10 +1840,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // -- rsp[8] : argumentsList
- // -- rsp[16] : thisArgument
- // -- rsp[24] : target
- // -- rsp[32] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : thisArgument
+ // -- args[3] : argumentsList
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -1672,18 +1852,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// thisArgument (if present) instead.
{
Label done;
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
__ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ movq(rdi, args.GetArgumentOperand(1)); // target
+ __ movq(rdi, args[1]); // target
__ j(equal, &done, Label::kNear);
- __ movq(rdx, args.GetArgumentOperand(2)); // thisArgument
+ __ movq(rdx, args[2]); // thisArgument
__ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ movq(rbx, args.GetArgumentOperand(3)); // argumentsList
+ __ movq(rbx, args[3]); // argumentsList
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leaq(rsp,
@@ -1712,10 +1892,11 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // -- rsp[8] : new.target (optional)
- // -- rsp[16] : argumentsList
- // -- rsp[24] : target
- // -- rsp[32] : receiver
+ // The order of args depends on V8_REVERSE_JSARGS
+ // -- args[0] : receiver
+ // -- args[1] : target
+ // -- args[2] : argumentsList
+ // -- args[3] : new.target (optional)
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -1724,19 +1905,19 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// (if present) instead.
{
Label done;
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
__ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
- __ movq(rdi, args.GetArgumentOperand(1)); // target
+ __ movq(rdi, args[1]); // target
__ movq(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
- __ movq(rbx, args.GetArgumentOperand(2)); // argumentsList
+ __ movq(rbx, args[2]); // argumentsList
__ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
- __ movq(rdx, args.GetArgumentOperand(3)); // new.target
+ __ movq(rdx, args[3]); // new.target
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leaq(rsp,
@@ -1809,14 +1990,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -----------------------------------
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpq(rbx, Immediate(kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+
+#ifndef V8_REVERSE_JSARGS
+ // This optimization is disabled when the arguments are reversed.
__ testl(
FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
__ j(not_zero, &skip_adapt_arguments);
+#endif
// -------------------------------------------
// Adapt arguments.
@@ -1834,7 +2019,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
+#ifdef V8_REVERSE_JSARGS
+ __ leaq(r8, Operand(rbp, rbx, times_system_pointer_size, offset));
+#else
__ leaq(r8, Operand(rbp, rax, times_system_pointer_size, offset));
+#endif
__ Set(rax, -1); // account for receiver
Label copy;
@@ -1850,6 +2039,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Too few parameters: Actual < expected.
__ bind(&under_application);
{
+#ifdef V8_REVERSE_JSARGS
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
+ __ movq(r8, rbx);
+ __ subq(r8, rax);
+ __ bind(&fill);
+ __ Push(kScratchRegister);
+ __ decq(r8);
+ __ j(greater, &fill);
+
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
@@ -1863,6 +2063,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmpq(r8, rax);
__ j(less, &copy);
+ // Update actual number of arguments.
+ __ movq(rax, rbx);
+#else // !V8_REVERSE_JSARGS
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
+ __ Set(r8, -1); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ incq(r8);
+ __ Push(Operand(r9, 0));
+ __ subq(r9, Immediate(kSystemPointerSize));
+ __ cmpq(r8, rax);
+ __ j(less, &copy);
+
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
@@ -1871,6 +2087,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Push(kScratchRegister);
__ cmpq(rax, rbx);
__ j(less, &fill);
+#endif // !V8_REVERSE_JSARGS
}
// Call the entry point.
@@ -1981,6 +2198,56 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Generate_StackOverflowCheck(masm, rcx, r8, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
+#ifdef V8_REVERSE_JSARGS
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy, check;
+ Register src = r8, dest = rsp, num = r9, current = r11;
+ __ movq(src, rsp);
+ __ movq(kScratchRegister, rcx);
+ __ negq(kScratchRegister);
+ __ leaq(rsp, Operand(rsp, kScratchRegister, times_system_pointer_size,
+ 0)); // Update stack pointer.
+ __ leaq(num, Operand(rax, 2)); // Number of words to copy.
+ // +2 for receiver and return address.
+ __ Set(current, 0);
+ __ jmp(&check);
+ __ bind(&copy);
+ __ movq(kScratchRegister,
+ Operand(src, current, times_system_pointer_size, 0));
+ __ movq(Operand(dest, current, times_system_pointer_size, 0),
+ kScratchRegister);
+ __ incq(current);
+ __ bind(&check);
+ __ cmpq(current, num);
+ __ j(less, &copy);
+ __ leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
+ }
+
+ // Copy the additional arguments onto the stack.
+ {
+ Register value = scratch;
+ Register src = rbx, dest = r8, num = rcx, current = r9;
+ __ Set(current, 0);
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmpl(current, num);
+ __ j(equal, &done, Label::kNear);
+ // Turn the hole into undefined as we go.
+ __ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(value, RootIndex::kTheHoleValue);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(value, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ movq(Operand(dest, current, times_system_pointer_size, 0), value);
+ __ incl(current);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ addq(rax, current);
+ }
+#else // !V8_REVERSE_JSARGS
{
Register value = scratch;
__ PopReturnAddressTo(r8);
@@ -2003,6 +2270,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ PushReturnAddressFrom(r8);
__ addq(rax, r9);
}
+#endif
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2076,9 +2344,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ PopReturnAddressTo(rcx);
__ bind(&loop);
{
- StackArgumentsAccessor args(rbx, r8, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ Push(args.GetArgumentOperand(0));
__ decl(r8);
+ __ Push(Operand(rbx, r8, times_system_pointer_size,
+ kFPOnStackSize + kPCOnStackSize));
__ j(not_zero, &loop);
}
__ PushReturnAddressFrom(rcx);
@@ -2101,7 +2369,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ AssertFunction(rdi);
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
@@ -2233,6 +2501,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
// -----------------------------------
+ // TODO(victor): Use Generate_StackOverflowCheck here.
// Check the stack for overflow.
{
Label done;
@@ -2242,7 +2511,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// We are not trying to catch interruptions (i.e. debug break and
// preemption) here, so check the "real stack limit".
- __ cmpq(kScratchRegister, RealStackLimitAsOperand(masm));
+ __ cmpq(kScratchRegister,
+ StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
__ j(above_equal, &done, Label::kNear);
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2252,6 +2522,35 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+#ifdef V8_REVERSE_JSARGS
+ // Save Return Address and Receiver into registers.
+ __ Pop(r8);
+ __ Pop(r10);
+
+ // Push [[BoundArguments]] to the stack.
+ {
+ Label loop;
+ __ LoadTaggedPointerField(
+ rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ addq(rax, rbx); // Adjust effective number of arguments.
+ __ bind(&loop);
+ // Instead of doing decl(rbx) here subtract kTaggedSize from the header
+ // offset in order to be able to move decl(rbx) right before the loop
+ // condition. This is necessary in order to avoid flags corruption by
+ // pointer decompression code.
+ __ LoadAnyTaggedField(
+ r12, FieldOperand(rcx, rbx, times_tagged_size,
+ FixedArray::kHeaderSize - kTaggedSize));
+ __ Push(r12);
+ __ decl(rbx);
+ __ j(greater, &loop);
+ }
+
+ // Recover Receiver and Return Address.
+ __ Push(r10);
+ __ Push(r8);
+#else // !V8_REVERSE_JSARGS
// Reserve stack space for the [[BoundArguments]].
__ movq(kScratchRegister, rbx);
__ AllocateStackSpace(kScratchRegister);
@@ -2282,7 +2581,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
// Instead of doing decl(rbx) here subtract kTaggedSize from the header
- // offset in order to move be able to move decl(rbx) right before the loop
+ // offset in order be able to move decl(rbx) right before the loop
// condition. This is necessary in order to avoid flags corruption by
// pointer decompression code.
__ LoadAnyTaggedField(
@@ -2298,6 +2597,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// arguments from the call plus return address plus the number of
// [[BoundArguments]]), so we need to subtract one for the return address.
__ decl(rax);
+#endif // !V8_REVERSE_JSARGS
}
__ bind(&no_bound_arguments);
}
@@ -2313,7 +2613,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(rdi);
// Patch the receiver to [[BoundThis]].
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
__ LoadAnyTaggedField(rbx,
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
__ movq(args.GetReceiverOperand(), rbx);
@@ -2334,7 +2634,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
Label non_callable;
__ JumpIfSmi(rdi, &non_callable);
@@ -2439,7 +2739,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rsp, rax);
+ StackArgumentsAccessor args(rax);
// Check if target is a Smi.
Label non_constructor;
@@ -2549,7 +2849,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
- // Push the WASM instance as an explicit argument to WasmCompileLazy.
+ // Push the Wasm instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(r11);
@@ -2575,6 +2875,49 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ jmp(r11);
}
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Push(Register::from_code(reg_code));
+ }
+
+ constexpr int kFpStackSize =
+ kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
+ __ AllocateStackSpace(kFpStackSize);
+ int offset = kFpStackSize;
+ for (int reg_code : base::bits::IterateBitsBackwards(
+ WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ offset -= kSimd128Size;
+ __ movdqu(Operand(rsp, offset), DoubleRegister::from_code(reg_code));
+ }
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
+ __ movdqu(DoubleRegister::from_code(reg_code), Operand(rsp, offset));
+ offset += kSimd128Size;
+ }
+ __ addq(rsp, Immediate(kFpStackSize));
+ for (int reg_code :
+ base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
+ __ Pop(Register::from_code(reg_code));
+ }
+ }
+
+ __ ret(0);
+}
+
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
@@ -2979,11 +3322,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- rbx : call data
// -- rdi : holder
// -- rsp[0] : return address
- // -- rsp[8] : last argument
+ // -- rsp[8] : argument argc
// -- ...
- // -- rsp[argc * 8] : first argument
- // -- rsp[(argc + 1) * 8] : receiver
+ // -- rsp[argc * 8] : argument 1
+ // -- rsp[(argc + 1) * 8] : argument 0 (receiver)
// -----------------------------------
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = rdx;
Register argc = rcx;
@@ -3042,8 +3386,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
+#ifdef V8_REVERSE_JSARGS
+ __ leaq(scratch,
+ Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
+#else
__ leaq(scratch, Operand(scratch, argc, times_system_pointer_size,
(FCA::kArgsLength - 1) * kSystemPointerSize));
+#endif
__ movq(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 8846258784..7b3ad8d1e0 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,6 +1,7 @@
bbudge@chromium.org
bmeurer@chromium.org
clemensb@chromium.org
+delphick@chromium.org
gdeepti@chromium.org
ishell@chromium.org
jarin@chromium.org
@@ -12,6 +13,7 @@ mvstanton@chromium.org
neis@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
+solanes@chromium.org
tebbi@chromium.org
titzer@chromium.org
mythria@chromium.org
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 46ad405489..349c8dc29e 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -852,13 +852,6 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
add(fp, sp, Operand(offset));
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return reg_code;
-}
-
void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
@@ -2373,15 +2366,25 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- Register scratch = r4;
- Push(scratch);
+ Register addr_scratch = r4;
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ str(pc,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()));
+ str(fp,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Push(addr_scratch);
+
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ str(pc, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ str(fp, MemOperand(addr_scratch));
- Move(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- str(pc, MemOperand(scratch));
- Move(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- str(fp, MemOperand(scratch));
- Pop(scratch);
+ Pop(addr_scratch);
}
// Just call directly. The function called cannot cause a GC, or
@@ -2389,19 +2392,25 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// stays correct.
Call(function);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- Register scratch1 = r4;
- Register scratch2 = r5;
- Push(scratch1);
- Push(scratch2);
- Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- mov(scratch2, Operand::Zero());
- str(scratch2, MemOperand(scratch1));
- Pop(scratch2);
- Pop(scratch1);
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r5;
+ Push(zero_scratch);
+ mov(zero_scratch, Operand::Zero());
+
+ if (root_array_available()) {
+ str(zero_scratch,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Push(addr_scratch);
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ str(zero_scratch, MemOperand(addr_scratch));
+ Pop(addr_scratch);
}
+ Pop(zero_scratch);
+
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (ActivationFrameAlignment() > kPointerSize) {
@@ -2452,7 +2461,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r10 (we don't need the roots array from now on).
@@ -2473,6 +2484,7 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
}
void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index d0d9b1c655..9ec1bafb58 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -254,6 +254,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatResult(DwVfpRegister dst);
void Trap() override;
+ void DebugBreak() override;
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug-code to enable.
@@ -315,7 +316,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -531,6 +533,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
private:
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
@@ -799,13 +811,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeFlag flag);
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 2130bdf0a4..0d453ec03e 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -63,7 +63,6 @@ const int kR9Available = 1; // 1 if available to us, 0 if reserved
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
// Caller-saved/arguments registers
const RegList kJSCallerSaved = 1 << 0 | // r0 a1
@@ -96,17 +95,6 @@ const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
const int kNumDoubleCalleeSaved = 8;
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 8416defe3c..f3c3e55975 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -463,19 +463,6 @@ bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
-Operand MemOperand::OffsetAsOperand() const {
- if (IsImmediateOffset()) {
- return offset();
- } else {
- DCHECK(IsRegisterOffset());
- if (extend() == NO_EXTEND) {
- return Operand(regoffset(), shift(), shift_amount());
- } else {
- return Operand(regoffset(), extend(), shift_amount());
- }
- }
-}
-
void Assembler::Unreachable() { debug("UNREACHABLE", __LINE__, BREAK); }
Address Assembler::target_pointer_address_at(Address pc) {
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index c3bae988f6..d5a0295934 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -83,18 +83,6 @@ CPURegister CPURegList::PopHighestIndex() {
return CPURegister::Create(index, size_, type_);
}
-void CPURegList::RemoveCalleeSaved() {
- if (type() == CPURegister::kRegister) {
- Remove(GetCalleeSaved(RegisterSizeInBits()));
- } else if (type() == CPURegister::kVRegister) {
- Remove(GetCalleeSavedV(RegisterSizeInBits()));
- } else {
- DCHECK_EQ(type(), CPURegister::kNoRegister);
- DCHECK(IsEmpty());
- // The list must already be empty, so do nothing.
- }
-}
-
void CPURegList::Align() {
// Use padreg, if necessary, to maintain stack alignment.
if (Count() % 2 != 0) {
@@ -109,7 +97,7 @@ void CPURegList::Align() {
}
CPURegList CPURegList::GetCalleeSaved(int size) {
- return CPURegList(CPURegister::kRegister, size, 19, 29);
+ return CPURegList(CPURegister::kRegister, size, 19, 28);
}
CPURegList CPURegList::GetCalleeSavedV(int size) {
@@ -118,9 +106,8 @@ CPURegList CPURegList::GetCalleeSavedV(int size) {
CPURegList CPURegList::GetCallerSaved(int size) {
// x18 is the platform register and is reserved for the use of platform ABIs.
- // Registers x0-x17 and lr (x30) are caller-saved.
+ // Registers x0-x17 are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17);
- list.Combine(lr);
return list;
}
@@ -131,34 +118,6 @@ CPURegList CPURegList::GetCallerSavedV(int size) {
return list;
}
-// This function defines the list of registers which are associated with a
-// safepoint slot. Safepoint register slots are saved contiguously on the stack.
-// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
-// code to index in the safepoint register slots. Any change here can affect
-// this mapping.
-CPURegList CPURegList::GetSafepointSavedRegisters() {
- CPURegList list = CPURegList::GetCalleeSaved();
- list.Combine(
- CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
-
- // Note that unfortunately we can't use symbolic names for registers and have
- // to directly use register codes. This is because this function is used to
- // initialize some static variables and we can't rely on register variables
- // to be initialized due to static initialization order issues in C++.
-
- // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
- // preserved outside of the macro assembler.
- list.Remove(16);
- list.Remove(17);
-
- // x18 is the platform register and is reserved for the use of platform ABIs.
-
- // Add the link register (x30) to the safepoint list.
- list.Combine(30);
-
- return list;
-}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -292,31 +251,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode);
}
-MemOperand::PairResult MemOperand::AreConsistentForPair(
- const MemOperand& operandA, const MemOperand& operandB,
- int access_size_log2) {
- DCHECK_GE(access_size_log2, 0);
- DCHECK_LE(access_size_log2, 3);
- // Step one: check that they share the same base, that the mode is Offset
- // and that the offset is a multiple of access size.
- if (operandA.base() != operandB.base() || (operandA.addrmode() != Offset) ||
- (operandB.addrmode() != Offset) ||
- ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
- return kNotPair;
- }
- // Step two: check that the offsets are contiguous and that the range
- // is OK for ldp/stp.
- if ((operandB.offset() == operandA.offset() + (1LL << access_size_log2)) &&
- is_int7(operandA.offset() >> access_size_log2)) {
- return kPairAB;
- }
- if ((operandA.offset() == operandB.offset() + (1LL << access_size_log2)) &&
- is_int7(operandB.offset() >> access_size_log2)) {
- return kPairBA;
- }
- return kNotPair;
-}
-
// Assembler
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
@@ -1184,6 +1118,30 @@ void Assembler::autia1716() { Emit(AUTIA1716); }
void Assembler::paciasp() { Emit(PACIASP); }
void Assembler::autiasp() { Emit(AUTIASP); }
+void Assembler::bti(BranchTargetIdentifier id) {
+ SystemHint op;
+ switch (id) {
+ case BranchTargetIdentifier::kBti:
+ op = BTI;
+ break;
+ case BranchTargetIdentifier::kBtiCall:
+ op = BTI_c;
+ break;
+ case BranchTargetIdentifier::kBtiJump:
+ op = BTI_j;
+ break;
+ case BranchTargetIdentifier::kBtiJumpCall:
+ op = BTI_jc;
+ break;
+ case BranchTargetIdentifier::kNone:
+ case BranchTargetIdentifier::kPaciasp:
+ // We always want to generate a BTI instruction here, so disallow
+ // skipping its generation or generating a PACIASP instead.
+ UNREACHABLE();
+ }
+ hint(op);
+}
+
void Assembler::ldp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src) {
LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 04fbaf3fb0..a9e8a5e85a 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -154,20 +154,6 @@ class MemOperand {
inline bool IsPreIndex() const;
inline bool IsPostIndex() const;
- // For offset modes, return the offset as an Operand. This helper cannot
- // handle indexed modes.
- inline Operand OffsetAsOperand() const;
-
- enum PairResult {
- kNotPair, // Can't use a pair instruction.
- kPairAB, // Can use a pair instruction (operandA has lower address).
- kPairBA // Can use a pair instruction (operandB has lower address).
- };
- // Check if two MemOperand are consistent for stp/ldp use.
- static PairResult AreConsistentForPair(const MemOperand& operandA,
- const MemOperand& operandB,
- int access_size_log2 = kXRegSizeLog2);
-
private:
Register base_;
Register regoffset_;
@@ -953,7 +939,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional speculation barrier.
void csdb();
- // Alias for system instructions.
+ // Branch target identification.
+ void bti(BranchTargetIdentifier id);
+
+ // No-op.
void nop() { hint(NOP); }
// Different nop operations are used by the code generator to detect certain
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index ccafae5e14..e63962993a 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -38,14 +38,10 @@ constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
-// Callee saved registers are x19-x30(lr).
-const int kNumberOfCalleeSavedRegisters = 11;
-const int kFirstCalleeSavedRegisterIndex = 19;
+// Callee saved registers are x19-x28.
+const int kNumberOfCalleeSavedRegisters = 10;
// Callee saved FP registers are d8-d15.
const int kNumberOfCalleeSavedVRegisters = 8;
-const int kFirstCalleeSavedVRegisterIndex = 8;
-// Callee saved registers with no specific purpose in JS are x19-x25.
-const size_t kJSCalleeSavedRegList = 0x03f80000;
const int kWRegSizeInBits = 32;
const int kWRegSizeInBitsLog2 = 5;
const int kWRegSize = kWRegSizeInBits >> 3;
@@ -389,7 +385,36 @@ enum SystemHint {
WFI = 3,
SEV = 4,
SEVL = 5,
- CSDB = 20
+ CSDB = 20,
+ BTI = 32,
+ BTI_c = 34,
+ BTI_j = 36,
+ BTI_jc = 38
+};
+
+// In a guarded page, only BTI and PACI[AB]SP instructions are allowed to be
+// the target of indirect branches. Details on which kinds of branches each
+// instruction allows follow in the comments below:
+enum class BranchTargetIdentifier {
+ // Do not emit a BTI instruction.
+ kNone,
+
+ // Emit a BTI instruction. Cannot be the target of indirect jumps/calls.
+ kBti,
+
+ // Emit a "BTI c" instruction. Can be the target of indirect jumps (BR) with
+ // x16/x17 as the target register, or indirect calls (BLR).
+ kBtiCall,
+
+ // Emit a "BTI j" instruction. Can be the target of indirect jumps (BR).
+ kBtiJump,
+
+ // Emit a "BTI jc" instruction, which is a combination of "BTI j" and "BTI c".
+ kBtiJumpCall,
+
+ // Emit a PACIASP instruction, which acts like a "BTI c" or a "BTI jc", based
+ // on the value of SCTLR_EL1.BT0.
+ kPaciasp
};
enum BarrierDomain {
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index ab022affdd..c2224ffe34 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -310,28 +310,6 @@ void Instruction::SetImmLLiteral(Instruction* source) {
SetInstructionBits(Mask(~mask) | imm);
}
-// TODO(jbramley): We can't put this inline in the class because things like
-// xzr and Register are not defined in that header. Consider adding
-// instructions-arm64-inl.h to work around this.
-bool InstructionSequence::IsInlineData() const {
- // Inline data is encoded as a single movz instruction which writes to xzr
- // (x31).
- return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
- // TODO(all): If we extend ::InlineData() to support bigger data, we need
- // to update this method too.
-}
-
-// TODO(jbramley): We can't put this inline in the class because things like
-// xzr and Register are not defined in that header. Consider adding
-// instructions-arm64-inl.h to work around this.
-uint64_t InstructionSequence::InlineData() const {
- DCHECK(IsInlineData());
- uint64_t payload = ImmMoveWide();
- // TODO(all): If we extend ::InlineData() to support bigger data, we need
- // to update this method too.
- return payload;
-}
-
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(IntegerFormatMap());
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 7fe732e2ba..d2341b972f 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -379,6 +379,24 @@ class Instruction {
(Mask(MoveWideImmediateMask) == MOVN_w);
}
+ bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; }
+
+ bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; }
+
+ bool IsBti() const {
+ if (Mask(SystemHintFMask) == SystemHintFixed) {
+ int imm_hint = ImmHint();
+ switch (imm_hint) {
+ case BTI:
+ case BTI_c:
+ case BTI_j:
+ case BTI_jc:
+ return true;
+ }
+ }
+ return false;
+ }
+
bool IsNop(int n) {
// A marking nop is an instruction
// mov r<n>, r<n>
@@ -445,20 +463,6 @@ class Instruction {
void SetBranchImmTarget(Instruction* target);
};
-// Where Instruction looks at instructions generated by the Assembler,
-// InstructionSequence looks at instructions sequences generated by the
-// MacroAssembler.
-class InstructionSequence : public Instruction {
- public:
- static InstructionSequence* At(Address address) {
- return reinterpret_cast<InstructionSequence*>(address);
- }
-
- // Sequences generated by MacroAssembler::InlineData().
- bool IsInlineData() const;
- uint64_t InlineData() const;
-};
-
// Simulator/Debugger debug instructions ---------------------------------------
// Each debug marker is represented by a HLT instruction. The immediate comment
// field in the instruction is used to identify the type of debug marker. Each
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index aa2df90d0f..809838bcf9 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -309,9 +309,71 @@ void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb,
bfxil(rd, rn, lsb, width);
}
-void TurboAssembler::Bind(Label* label) {
+void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) {
DCHECK(allow_macro_instructions());
- bind(label);
+ if (id == BranchTargetIdentifier::kNone) {
+ bind(label);
+ } else {
+ // Emit this inside an InstructionAccurateScope to ensure there are no extra
+ // instructions between the bind and the target identifier instruction.
+ InstructionAccurateScope scope(this, 1);
+ bind(label);
+ if (id == BranchTargetIdentifier::kPaciasp) {
+ paciasp();
+ } else {
+ bti(id);
+ }
+ }
+}
+
+void TurboAssembler::CodeEntry() {
+ // Since `kJavaScriptCallCodeStartRegister` is the target register for tail
+ // calls, we have to allow for jumps too, with "BTI jc". We also allow the
+ // register allocator to pick the target register for calls made from
+ // WebAssembly.
+ // TODO(v8:10026): Consider changing this so that we can use CallTarget(),
+ // which maps to "BTI c", here instead.
+ JumpOrCallTarget();
+}
+
+void TurboAssembler::ExceptionHandler() { JumpTarget(); }
+
+void TurboAssembler::BindExceptionHandler(Label* label) {
+ BindJumpTarget(label);
+}
+
+void TurboAssembler::JumpTarget() {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ bti(BranchTargetIdentifier::kBtiJump);
+#endif
+}
+
+void TurboAssembler::BindJumpTarget(Label* label) {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Bind(label, BranchTargetIdentifier::kBtiJump);
+#else
+ Bind(label);
+#endif
+}
+
+void TurboAssembler::CallTarget() {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ bti(BranchTargetIdentifier::kBtiCall);
+#endif
+}
+
+void TurboAssembler::JumpOrCallTarget() {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ bti(BranchTargetIdentifier::kBtiJumpCall);
+#endif
+}
+
+void TurboAssembler::BindJumpOrCallTarget(Label* label) {
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Bind(label, BranchTargetIdentifier::kBtiJumpCall);
+#else
+ Bind(label);
+#endif
}
void TurboAssembler::Bl(Label* label) {
@@ -1063,6 +1125,166 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
void TurboAssembler::jmp(Label* L) { B(L); }
+template <TurboAssembler::StoreLRMode lr_mode>
+void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr) ||
+ (src2 == lr) || (src3 == lr)));
+ DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr) &&
+ (src2 != lr) && (src3 != lr)));
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kSignLR) {
+ Paciasp();
+ }
+#endif
+
+ int count = 1 + src1.is_valid() + src2.is_valid() + src3.is_valid();
+ int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
+
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+template <TurboAssembler::StoreLRMode lr_mode>
+void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
+ DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr)));
+ DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr)));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kSignLR) {
+ Paciasp();
+ }
+#endif
+
+ int size = src0.SizeInBytes() + src1.SizeInBytes();
+ DCHECK_EQ(0, size % 16);
+
+ // Reserve room for src0 and push src1.
+ str(src1, MemOperand(sp, -size, PreIndex));
+ // Fill the gap with src0.
+ str(src0, MemOperand(sp, src1.SizeInBytes()));
+}
+
+template <TurboAssembler::LoadLRMode lr_mode>
+void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(dst0.is_valid());
+
+ int count = 1 + dst1.is_valid() + dst2.is_valid() + dst3.is_valid();
+ int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
+
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+
+ DCHECK_IMPLIES((lr_mode == kAuthLR), ((dst0 == lr) || (dst1 == lr) ||
+ (dst2 == lr) || (dst3 == lr)));
+ DCHECK_IMPLIES((lr_mode == kDontLoadLR), ((dst0 != lr) && (dst1 != lr)) &&
+ (dst2 != lr) && (dst3 != lr));
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kAuthLR) {
+ Autiasp();
+ }
+#endif
+}
+
+template <TurboAssembler::StoreLRMode lr_mode>
+void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
+ DCHECK_IMPLIES((lr_mode == kSignLR), (src == lr));
+ DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kSignLR) {
+ Paciasp();
+ }
+#endif
+
+ if (offset.IsImmediate()) {
+ DCHECK_GE(offset.ImmediateValue(), 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, AbortReason::kStackAccessBelowStackPointer);
+ }
+
+ Str(src, MemOperand(sp, offset));
+}
+
+template <TurboAssembler::LoadLRMode lr_mode>
+void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ DCHECK_GE(offset.ImmediateValue(), 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, AbortReason::kStackAccessBelowStackPointer);
+ }
+
+ Ldr(dst, MemOperand(sp, offset));
+
+ DCHECK_IMPLIES((lr_mode == kAuthLR), (dst == lr));
+ DCHECK_IMPLIES((lr_mode == kDontLoadLR), (dst != lr));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kAuthLR) {
+ Autiasp();
+ }
+#endif
+}
+
+template <TurboAssembler::StoreLRMode lr_mode>
+void TurboAssembler::PushCPURegList(CPURegList registers) {
+ DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) {
+ Paciasp();
+ }
+#endif
+
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // Push up to four registers at a time.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+template <TurboAssembler::LoadLRMode lr_mode>
+void TurboAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ bool contains_lr = registers.IncludesAliasOf(lr);
+ DCHECK_IMPLIES((lr_mode == kDontLoadLR), !contains_lr);
+#endif
+
+ // Pop up to four registers at a time.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ if (lr_mode == kAuthLR && contains_lr) {
+ Autiasp();
+ }
+#endif
+}
+
void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
@@ -1245,12 +1467,6 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
}
}
-void MacroAssembler::InlineData(uint64_t data) {
- DCHECK(is_uint16(data));
- InstructionAccurateScope scope(this, 1);
- movz(xzr, data);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 07e308b48b..1273904c9c 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -60,7 +60,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
list.Remove(exclusion);
list.Align();
- PushCPURegList(list);
+ PushCPURegList<kDontStoreLR>(list);
int bytes = list.Count() * kXRegSizeInBits / 8;
@@ -84,7 +84,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
list.Remove(exclusion);
list.Align();
- PopCPURegList(list);
+ PopCPURegList<kDontLoadLR>(list);
bytes += list.Count() * kXRegSizeInBits / 8;
return bytes;
@@ -1046,17 +1046,6 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm,
// Abstracted stack operations.
void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
- const CPURegister& src2, const CPURegister& src3) {
- DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
-
- int count = 1 + src1.is_valid() + src2.is_valid() + src3.is_valid();
- int size = src0.SizeInBytes();
- DCHECK_EQ(0, (size * count) % 16);
-
- PushHelper(count, size, src0, src1, src2, src3);
-}
-
-void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
@@ -1071,21 +1060,6 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
}
void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
- const CPURegister& dst2, const CPURegister& dst3) {
- // It is not valid to pop into the same register more than once in one
- // instruction, not even into the zero register.
- DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
- DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- DCHECK(dst0.is_valid());
-
- int count = 1 + dst1.is_valid() + dst2.is_valid() + dst3.is_valid();
- int size = dst0.SizeInBytes();
- DCHECK_EQ(0, (size * count) % 16);
-
- PopHelper(count, size, dst0, dst1, dst2, dst3);
-}
-
-void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5,
const CPURegister& dst6, const CPURegister& dst7) {
@@ -1103,48 +1077,6 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
}
-void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
- int size = src0.SizeInBytes() + src1.SizeInBytes();
- DCHECK_EQ(0, size % 16);
-
- // Reserve room for src0 and push src1.
- str(src1, MemOperand(sp, -size, PreIndex));
- // Fill the gap with src0.
- str(src0, MemOperand(sp, src1.SizeInBytes()));
-}
-
-void TurboAssembler::PushCPURegList(CPURegList registers) {
- int size = registers.RegisterSizeInBytes();
- DCHECK_EQ(0, (size * registers.Count()) % 16);
-
- // Push up to four registers at a time.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& src0 = registers.PopHighestIndex();
- const CPURegister& src1 = registers.PopHighestIndex();
- const CPURegister& src2 = registers.PopHighestIndex();
- const CPURegister& src3 = registers.PopHighestIndex();
- int count = count_before - registers.Count();
- PushHelper(count, size, src0, src1, src2, src3);
- }
-}
-
-void TurboAssembler::PopCPURegList(CPURegList registers) {
- int size = registers.RegisterSizeInBytes();
- DCHECK_EQ(0, (size * registers.Count()) % 16);
-
- // Pop up to four registers at a time.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& dst0 = registers.PopLowestIndex();
- const CPURegister& dst1 = registers.PopLowestIndex();
- const CPURegister& dst2 = registers.PopLowestIndex();
- const CPURegister& dst3 = registers.PopLowestIndex();
- int count = count_before - registers.Count();
- PopHelper(count, size, dst0, dst1, dst2, dst3);
- }
-}
-
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
@@ -1249,28 +1181,6 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
}
}
-void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
- if (offset.IsImmediate()) {
- DCHECK_GE(offset.ImmediateValue(), 0);
- } else if (emit_debug_code()) {
- Cmp(xzr, offset);
- Check(le, AbortReason::kStackAccessBelowStackPointer);
- }
-
- Str(src, MemOperand(sp, offset));
-}
-
-void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
- if (offset.IsImmediate()) {
- DCHECK_GE(offset.ImmediateValue(), 0);
- } else if (emit_debug_code()) {
- Cmp(xzr, offset);
- Check(le, AbortReason::kStackAccessBelowStackPointer);
- }
-
- Ldr(dst, MemOperand(sp, offset));
-}
-
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
@@ -1286,50 +1196,61 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
}
void MacroAssembler::PushCalleeSavedRegisters() {
- // Ensure that the macro-assembler doesn't use any scratch registers.
- InstructionAccurateScope scope(this);
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Paciasp();
+#endif
- MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
+ {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
- stp(d14, d15, tos);
- stp(d12, d13, tos);
- stp(d10, d11, tos);
- stp(d8, d9, tos);
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
- STATIC_ASSERT(
- EntryFrameConstants::kCalleeSavedRegisterBytesPushedBeforeFpLrPair ==
- 8 * kSystemPointerSize);
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
- stp(x29, x30, tos); // fp, lr
+ STATIC_ASSERT(
+ EntryFrameConstants::kCalleeSavedRegisterBytesPushedBeforeFpLrPair ==
+ 8 * kSystemPointerSize);
+ stp(x29, x30, tos); // fp, lr
- STATIC_ASSERT(
- EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair ==
- 10 * kSystemPointerSize);
+ STATIC_ASSERT(
+ EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair ==
+ 10 * kSystemPointerSize);
- stp(x27, x28, tos);
- stp(x25, x26, tos);
- stp(x23, x24, tos);
- stp(x21, x22, tos);
- stp(x19, x20, tos);
+ stp(x27, x28, tos);
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+ }
}
void MacroAssembler::PopCalleeSavedRegisters() {
- // Ensure that the macro-assembler doesn't use any scratch registers.
- InstructionAccurateScope scope(this);
+ {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
- MemOperand tos(sp, 2 * kXRegSize, PostIndex);
+ MemOperand tos(sp, 2 * kXRegSize, PostIndex);
- ldp(x19, x20, tos);
- ldp(x21, x22, tos);
- ldp(x23, x24, tos);
- ldp(x25, x26, tos);
- ldp(x27, x28, tos);
- ldp(x29, x30, tos);
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos);
+ ldp(x29, x30, tos);
- ldp(d8, d9, tos);
- ldp(d10, d11, tos);
- ldp(d12, d13, tos);
- ldp(d14, d15, tos);
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+ }
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Autiasp();
+#endif
}
void TurboAssembler::AssertSpAligned() {
@@ -1712,34 +1633,47 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- Register scratch1 = x4;
- Register scratch2 = x5;
- Push(scratch1, scratch2);
-
- Label get_pc;
- Bind(&get_pc);
- Adr(scratch2, &get_pc);
-
- Mov(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- Str(scratch2, MemOperand(scratch1));
- Mov(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Str(fp, MemOperand(scratch1));
-
- Pop(scratch2, scratch1);
+ Register pc_scratch = x4;
+ Register addr_scratch = x5;
+ Push(pc_scratch, addr_scratch);
+
+ Label get_pc;
+ Bind(&get_pc);
+ Adr(pc_scratch, &get_pc);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ Str(pc_scratch,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()));
+ Str(fp,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Mov(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Str(pc_scratch, MemOperand(addr_scratch));
+ Mov(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(fp, MemOperand(addr_scratch));
}
+ Pop(addr_scratch, pc_scratch);
+
// Call directly. The function called cannot cause a GC, or allow preemption,
// so the return address in the link register stays correct.
Call(function);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- Register scratch = x4;
- Push(scratch, xzr);
- Mov(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Str(xzr, MemOperand(scratch));
- Pop(xzr, scratch);
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ Str(xzr,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Push(addr_scratch, xzr);
+ Mov(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Str(xzr, MemOperand(addr_scratch));
+ Pop(xzr, addr_scratch);
}
if (num_of_reg_args > kRegisterPassedArguments) {
@@ -1800,7 +1734,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
namespace {
// The calculated offset is either:
-// * the 'target' input unmodified if this is a WASM call, or
+// * the 'target' input unmodified if this is a Wasm call, or
// * the offset of the target from the current PC, in instructions, for any
// other type of call.
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
@@ -2017,18 +1951,22 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// GC, since the callee function will return to it.
UseScratchRegisterScope temps(this);
- Register scratch1 = temps.AcquireX();
+ temps.Exclude(x16, x17);
Label return_location;
- Adr(scratch1, &return_location);
- Poke(scratch1, 0);
+ Adr(x17, &return_location);
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Add(x16, sp, kSystemPointerSize);
+ Pacia1716();
+#endif
+ Poke(x17, 0);
if (emit_debug_code()) {
- // Verify that the slot below fp[kSPOffset]-8 points to the return location.
- Register scratch2 = temps.AcquireX();
- Ldr(scratch2, MemOperand(fp, ExitFrameConstants::kSPOffset));
- Ldr(scratch2, MemOperand(scratch2, -static_cast<int64_t>(kXRegSize)));
- Cmp(scratch2, scratch1);
+ // Verify that the slot below fp[kSPOffset]-8 points to the signed return
+ // location.
+ Ldr(x16, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Ldr(x16, MemOperand(x16, -static_cast<int64_t>(kXRegSize)));
+ Cmp(x16, x17);
Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
}
@@ -2047,19 +1985,20 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start;
- bind(&start);
-#endif
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstrSize, 0);
offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
- DCHECK_EQ(SizeOfCodeGeneratedSince(&start), Deoptimizer::kDeoptExitSize);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ USE(exit, kind);
}
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
@@ -2096,8 +2035,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
- Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ RestoreFPAndLR();
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
@@ -2242,7 +2180,7 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
LoadTaggedPointerField(cp,
FieldMemOperand(function, JSFunction::kContextOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
- // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // (kDontAdaptArgumentsSentinel), so we need sign
// extension to correctly handle it.
LoadTaggedPointerField(
expected_parameter_count,
@@ -2301,7 +2239,8 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
- StubCallMode stub_mode) {
+ StubCallMode stub_mode,
+ LinkRegisterStatus lr_status) {
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@@ -2309,7 +2248,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
TryConvertDoubleToInt64(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
- Push(lr, double_input);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push<TurboAssembler::kSignLR>(lr, double_input);
+ } else {
+ Push<TurboAssembler::kDontStoreLR>(xzr, double_input);
+ }
// DoubleToI preserves any registers it needs to clobber.
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
@@ -2322,7 +2265,13 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Ldr(result, MemOperand(sp, 0));
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
- Pop(xzr, lr); // xzr to drop the double input on the stack.
+
+ if (lr_status == kLRHasNotBeenSaved) {
+ // Pop into xzr here to drop the double input on the stack:
+ Pop<TurboAssembler::kAuthLR>(xzr, lr);
+ } else {
+ Drop(2);
+ }
Bind(&done);
// Keep our invariant that the upper 32 bits are zero.
@@ -2330,18 +2279,17 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
}
void TurboAssembler::Prologue() {
- Push(lr, fp, cp, x1);
+ Push<TurboAssembler::kSignLR>(lr, fp, cp, x1);
Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
- if (type == StackFrame::INTERNAL) {
+ if (type == StackFrame::INTERNAL || type == StackFrame::WASM_DEBUG_BREAK) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
- // type_reg pushed twice for alignment.
- Push(lr, fp, type_reg, type_reg);
+ Push<TurboAssembler::kSignLR>(lr, fp, type_reg, padreg);
const int kFrameSize =
TypedFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize;
Add(fp, sp, kFrameSize);
@@ -2354,7 +2302,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
type == StackFrame::WASM_EXIT) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
- Push(lr, fp);
+ Push<TurboAssembler::kSignLR>(lr, fp);
Mov(fp, sp);
Push(type_reg, padreg);
// sp[3] : lr
@@ -2368,7 +2316,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// Users of this frame type push a context pointer after the type field,
// so do it here to keep the stack pointer aligned.
- Push(lr, fp, type_reg, cp);
+ Push<TurboAssembler::kSignLR>(lr, fp, type_reg, cp);
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
@@ -2385,7 +2333,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(sp, fp);
- Pop(fp, lr);
+ Pop<TurboAssembler::kAuthLR>(fp, lr);
}
void MacroAssembler::ExitFramePreserveFPRegs() {
@@ -2415,7 +2363,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the new stack frame.
- Push(lr, fp);
+ Push<TurboAssembler::kSignLR>(lr, fp);
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
@@ -2498,7 +2446,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
Mov(sp, fp);
- Pop(fp, lr);
+ Pop<TurboAssembler::kAuthLR>(fp, lr);
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
@@ -2675,30 +2623,6 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
RecordComment("]");
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // Make sure the safepoint registers list is what we expect.
- DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6FFCFFFF);
-
- // Safepoint registers are stored contiguously on the stack, but not all the
- // registers are saved. The following registers are excluded:
- // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
- // the macro assembler.
- // - x31 (sp) because the system stack pointer doesn't need to be included
- // in safepoint registers.
- //
- // This function implements the mapping of register code to index into the
- // safepoint register slots.
- if ((reg_code >= 0) && (reg_code <= 15)) {
- return reg_code;
- } else if ((reg_code >= 18) && (reg_code <= 30)) {
- // Skip ip0 and ip1.
- return reg_code - 2;
- } else {
- // This register has no safepoint register slot.
- UNREACHABLE();
- }
-}
-
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
UseScratchRegisterScope temps(this);
@@ -2751,25 +2675,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
void TurboAssembler::SaveRegisters(RegList registers) {
DCHECK_GT(NumRegs(registers), 0);
- CPURegList regs(lr);
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs.Combine(Register::XRegFromCode(i));
- }
- }
-
+ CPURegList regs(CPURegister::kRegister, kXRegSizeInBits, registers);
+ // If we were saving LR, we might need to sign it.
+ DCHECK(!regs.IncludesAliasOf(lr));
+ regs.Align();
PushCPURegList(regs);
}
void TurboAssembler::RestoreRegisters(RegList registers) {
DCHECK_GT(NumRegs(registers), 0);
- CPURegList regs(lr);
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- regs.Combine(Register::XRegFromCode(i));
- }
- }
-
+ CPURegList regs(CPURegister::kRegister, kXRegSizeInBits, registers);
+ // If we were saving LR, we might need to sign it.
+ DCHECK(!regs.IncludesAliasOf(lr));
+ regs.Align();
PopCPURegList(regs);
}
@@ -2924,11 +2842,11 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// Record the actual write.
if (lr_status == kLRHasNotBeenSaved) {
- Push(padreg, lr);
+ Push<TurboAssembler::kSignLR>(padreg, lr);
}
CallRecordWriteStub(object, offset, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
- Pop(lr, padreg);
+ Pop<TurboAssembler::kAuthLR>(lr, padreg);
}
Bind(&done);
@@ -2944,14 +2862,6 @@ void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
-void MacroAssembler::AssertRegisterIsRoot(Register reg, RootIndex index,
- AbortReason reason) {
- if (emit_debug_code()) {
- CompareRoot(reg, index);
- Check(eq, reason);
- }
-}
-
void TurboAssembler::Check(Condition cond, AbortReason reason) {
Label ok;
B(cond, &ok);
@@ -2961,6 +2871,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
}
void TurboAssembler::Trap() { Brk(0); }
+void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
@@ -3182,7 +3093,7 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
// Preserve all caller-saved registers as well as NZCV.
// PushCPURegList asserts that the size of each list is a multiple of 16
// bytes.
- PushCPURegList(saved_registers);
+ PushCPURegList<kDontStoreLR>(saved_registers);
PushCPURegList(kCallerSavedV);
// We can use caller-saved registers as scratch values (except for argN).
@@ -3235,7 +3146,7 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
}
PopCPURegList(kCallerSavedV);
- PopCPURegList(saved_registers);
+ PopCPURegList<kDontLoadLR>(saved_registers);
TmpList()->set_list(old_tmp_list);
FPTmpList()->set_list(old_fp_tmp_list);
@@ -3273,6 +3184,35 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Mov(kSpeculationPoisonRegister, -1);
}
+void TurboAssembler::RestoreFPAndLR() {
+ static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
+ StandardFrameConstants::kCallerPCOffset,
+ "Offsets must be consecutive for ldp!");
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // Make sure we can use x16 and x17.
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(x16, x17);
+ // We can load the return address directly into x17.
+ Add(x16, fp, StandardFrameConstants::kCallerSPOffset);
+ Ldp(fp, x17, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ Autia1716();
+ Mov(lr, x17);
+#else
+ Ldp(fp, lr, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+#endif
+}
+
+void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(x16, x17);
+ Adr(x17, return_location);
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Add(x16, fp, WasmExitFrameConstants::kCallingPCOffset + kSystemPointerSize);
+ Pacia1716();
+#endif
+ Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index f7fbb1105d..7b1fb69e95 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -18,12 +18,6 @@
// Simulator specific helpers.
#if USE_SIMULATOR
-// TODO(all): If possible automatically prepend an indicator like
-// UNIMPLEMENTED or LOCATION.
-#define ASM_UNIMPLEMENTED(message) __ Debug(message, __LINE__, NO_PARAM)
-#define ASM_UNIMPLEMENTED_BREAK(message) \
- __ Debug(message, __LINE__, \
- FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
#define ASM_LOCATION_IN_ASSEMBLER(message) \
@@ -33,8 +27,6 @@
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
#else
-#define ASM_UNIMPLEMENTED(message)
-#define ASM_UNIMPLEMENTED_BREAK(message)
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
@@ -136,10 +128,6 @@ inline BranchType InvertBranchType(BranchType type) {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-enum TargetAddressStorageMode {
- CAN_INLINE_TARGET_ADDRESS,
- NEVER_INLINE_TARGET_ADDRESS
-};
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
// The macro assembler supports moving automatically pre-shifted immediates for
@@ -590,6 +578,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
void Trap() override;
+ void DebugBreak() override;
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
@@ -630,7 +619,31 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Returns false, otherwise.
bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
- inline void Bind(Label* label);
+ inline void Bind(Label* label,
+ BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
+
+ // Control-flow integrity:
+
+ // Define a function entrypoint.
+ inline void CodeEntry();
+ // Define an exception handler.
+ inline void ExceptionHandler();
+ // Define an exception handler and bind a label.
+ inline void BindExceptionHandler(Label* label);
+
+ // Control-flow integrity:
+
+ // Define a jump (BR) target.
+ inline void JumpTarget();
+ // Define a jump (BR) target and bind a label.
+ inline void BindJumpTarget(Label* label);
+ // Define a call (BLR) target. The target also allows tail calls (via BR)
+ // when the target is x16 or x17.
+ inline void CallTarget();
+ // Define a jump/call target.
+ inline void JumpOrCallTarget();
+ // Define a jump/call target and bind a label.
+ inline void BindJumpOrCallTarget(Label* label);
static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
@@ -781,20 +794,33 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
- // Other than the registers passed into Pop, the stack pointer and (possibly)
- // the system stack pointer, these methods do not modify any other registers.
+ // Other than the registers passed into Pop, the stack pointer, (possibly)
+ // the system stack pointer and (possibly) the link register, these methods
+ // do not modify any other registers.
+ //
+ // Some of the methods take an optional LoadLRMode or StoreLRMode template
+ // argument, which specifies whether we need to sign the link register at the
+ // start of the operation, or authenticate it at the end of the operation,
+ // when control flow integrity measures are enabled.
+ // When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
+ // argument to the operation.
+ enum LoadLRMode { kAuthLR, kDontLoadLR };
+ enum StoreLRMode { kSignLR, kDontStoreLR };
+ template <StoreLRMode lr_mode = kDontStoreLR>
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
void Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5 = NoReg,
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
+ template <LoadLRMode lr_mode = kDontLoadLR>
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5 = NoReg,
const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
+ template <StoreLRMode lr_mode = kDontStoreLR>
void Push(const Register& src0, const VRegister& src1);
// This is a convenience method for pushing a single Handle<Object>.
@@ -836,7 +862,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// kSRegSizeInBits are supported.
//
// Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ //
+ // The methods take an optional LoadLRMode or StoreLRMode template argument.
+ // When control flow integrity measures are enabled and the link register is
+ // included in 'registers', passing kSignLR to PushCPURegList will sign the
+ // link register before pushing the list, and passing kAuthLR to
+ // PopCPURegList will authenticate it after popping the list.
+ template <StoreLRMode lr_mode = kDontStoreLR>
void PushCPURegList(CPURegList registers);
+ template <LoadLRMode lr_mode = kDontLoadLR>
void PopCPURegList(CPURegList registers);
// Calculate how much stack space (in bytes) are required to store caller
@@ -941,7 +975,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
// Calls a C function.
// The called function is not allowed to trigger a
@@ -958,7 +993,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
- DoubleRegister double_input, StubCallMode stub_mode);
+ DoubleRegister double_input, StubCallMode stub_mode,
+ LinkRegisterStatus lr_status);
inline void Mul(const Register& rd, const Register& rn, const Register& rm);
@@ -1040,10 +1076,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
// be 16 byte aligned.
+ // When the optional template argument is kSignLR and control flow integrity
+ // measures are enabled, we sign the link register before poking it onto the
+ // stack. 'src' must be lr in this case.
+ template <StoreLRMode lr_mode = kDontStoreLR>
void Poke(const CPURegister& src, const Operand& offset);
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
// The stack pointer must be aligned to 16 bytes.
+ // When the optional template argument is kAuthLR and control flow integrity
+ // measures are enabled, we authenticate the link register after peeking the
+ // value. 'dst' must be lr in this case.
+ template <LoadLRMode lr_mode = kDontLoadLR>
void Peek(const CPURegister& dst, const Operand& offset);
// Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
@@ -1295,6 +1339,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
+ // Restore FP and LR from the values stored in the current frame. This will
+ // authenticate the LR when pointer authentication is enabled.
+ void RestoreFPAndLR();
+
+ void StoreReturnAddressInWasmExitFrame(Label* return_location);
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1623,21 +1673,27 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
tbx(vd, vn, vn2, vn3, vn4, vm);
}
+ // For the 'lr_mode' template argument of the following methods, see
+ // PushCPURegList/PopCPURegList.
+ template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
- PushCPURegList(CPURegList(type, reg_size, registers));
+ PushCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
}
+ template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
- PopCPURegList(CPURegList(type, reg_size, registers));
+ PopCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
}
+ template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushXRegList(RegList regs) {
- PushSizeRegList(regs, kXRegSizeInBits);
+ PushSizeRegList<lr_mode>(regs, kXRegSizeInBits);
}
+ template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopXRegList(RegList regs) {
- PopSizeRegList(regs, kXRegSizeInBits);
+ PopSizeRegList<lr_mode>(regs, kXRegSizeInBits);
}
inline void PushWRegList(RegList regs) {
PushSizeRegList(regs, kWRegSizeInBits);
@@ -1667,11 +1723,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
- // Insert one or more instructions into the instruction stream that encode
- // some caller-defined data. The instructions used will be executable with no
- // side effects.
- inline void InlineData(uint64_t data);
-
// Preserve the callee-saved registers (as defined by AAPCS64).
//
// Higher-numbered registers are pushed before lower-numbered registers, and
@@ -1679,6 +1730,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Floating-point registers are pushed before general-purpose registers, and
// thus get higher addresses.
//
+ // When control flow integrity measures are enabled, this method signs the
+ // link register before pushing it.
+ //
// Note that registers are not checked for invalid values. Use this method
// only if you know that the GC won't try to examine the values on the stack.
void PushCalleeSavedRegisters();
@@ -1689,12 +1743,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
// Floating-point registers are popped after general-purpose registers, and
// thus come from higher addresses.
+ //
+ // When control flow integrity measures are enabled, this method
+ // authenticates the link register after popping it.
void PopCalleeSavedRegisters();
// Helpers ------------------------------------------------------------------
- static int SafepointRegisterStackIndex(int reg_code);
-
template <typename Field>
void DecodeField(Register dst, Register src) {
static const int shift = Field::kShift;
@@ -1925,25 +1980,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Debugging.
- void AssertRegisterIsRoot(
- Register reg, RootIndex index,
- AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
-
void LoadNativeContextSlot(int index, Register dst);
- // Far branches resolving.
- //
- // The various classes of branch instructions with immediate offsets have
- // different ranges. While the Assembler will fail to assemble a branch
- // exceeding its range, the MacroAssembler offers a mechanism to resolve
- // branches to too distant targets, either by tweaking the generated code to
- // use branch instructions with wider ranges or generating veneers.
- //
- // Currently branches to distant targets are resolved using unconditional
- // branch isntructions with a range of +-128MB. If that becomes too little
- // (!), the mechanism can be extended to generate special veneers for really
- // far targets.
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
@@ -2043,7 +2081,7 @@ class UseScratchRegisterScope {
CPURegList list(reg1, reg2);
Include(list);
}
- void Exclude(const Register& reg1, const Register& reg2) {
+ void Exclude(const Register& reg1, const Register& reg2 = NoReg) {
CPURegList list(reg1, reg2);
Exclude(list);
}
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 756e3dcaf3..03a5900b5a 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -71,23 +71,6 @@ namespace internal {
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-const int kNumRegs = kNumberOfRegisters;
-// Registers x0-x17 are caller-saved.
-const int kNumJSCallerSaved = 18;
-const RegList kJSCallerSaved = 0x3ffff;
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of eight.
-// TODO(all): Refine this number.
-const int kNumSafepointRegisters = 32;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
-#define kNumSafepointSavedRegisters \
- CPURegList::GetSafepointSavedRegisters().Count()
-
// Some CPURegister methods can return Register and VRegister types, so we
// need to declare them in advance.
class Register;
@@ -539,8 +522,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -602,10 +583,6 @@ class V8_EXPORT_PRIVATE CPURegList {
void Combine(int code);
void Remove(int code);
- // Remove all callee-saved registers from the list. This can be useful when
- // preparing registers for an AAPCS64 function call, for example.
- void RemoveCalleeSaved();
-
// Align the list to 16 bytes.
void Align();
@@ -622,9 +599,6 @@ class V8_EXPORT_PRIVATE CPURegList {
static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
static CPURegList GetCallerSavedV(int size = kDRegSizeInBits);
- // Registers saved as safepoints.
- static CPURegList GetSafepointSavedRegisters();
-
bool IsEmpty() const {
return list_ == 0;
}
diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h
index cab4cbfc3b..d56b372504 100644
--- a/deps/v8/src/codegen/assembler-arch.h
+++ b/deps/v8/src/codegen/assembler-arch.h
@@ -15,7 +15,7 @@
#include "src/codegen/arm64/assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/codegen/arm/assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/assembler-mips.h"
diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h
index fd08a38555..8c81315d50 100644
--- a/deps/v8/src/codegen/assembler-inl.h
+++ b/deps/v8/src/codegen/assembler-inl.h
@@ -15,7 +15,7 @@
#include "src/codegen/arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/codegen/arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/assembler-mips-inl.h"
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index bc30192a7c..4bda1260a9 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -70,8 +70,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
#endif
options.inline_offheap_trampolines &= !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- const base::AddressRegion& code_range =
- isolate->heap()->memory_allocator()->code_range();
+ const base::AddressRegion& code_range = isolate->heap()->code_range();
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
options.code_range_start = code_range.begin();
#endif
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 92925a5bf9..1c287222e9 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -143,7 +143,7 @@ enum class CodeObjectRequired { kNo, kYes };
struct V8_EXPORT_PRIVATE AssemblerOptions {
// Recording reloc info for external references and off-heap targets is
- // needed whenever code is serialized, e.g. into the snapshot or as a WASM
+ // needed whenever code is serialized, e.g. into the snapshot or as a Wasm
// module. This flag allows this reloc info to be disabled for code that
// will not survive process destruction.
bool record_reloc_info_for_serialization = true;
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 702a64d091..0464faea3b 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -23,6 +23,7 @@
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-cell.h"
+#include "src/roots/roots.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -116,8 +117,7 @@ void CodeStubAssembler::Check(const NodeGenerator<BoolT>& condition_body,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
BranchGenerator branch = [=](Label* ok, Label* not_ok) {
- Node* condition = condition_body();
- DCHECK_NOT_NULL(condition);
+ TNode<BoolT> condition = condition_body();
Branch(condition, ok, not_ok);
};
@@ -146,7 +146,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
void CodeStubAssembler::CollectCallableFeedback(
TNode<Object> maybe_target, TNode<Context> context,
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id,
+ CallableFeedbackMode mode) {
Label extra_checks(this, Label::kDeferred), done(this);
// Check if we have monomorphic {target} feedback already.
@@ -176,7 +177,47 @@ void CodeStubAssembler::CollectCallableFeedback(
// If the weak reference is cleared, we have a new chance to become
// monomorphic.
Comment("check if weak reference is cleared");
- Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
+ GotoIf(IsCleared(feedback), &initialize);
+ GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic);
+
+ if (mode == CallableFeedbackMode::kDontCollectFeedbackCell) {
+ Goto(&mark_megamorphic);
+ } else {
+ Label try_transition_to_feedback_cell(this);
+
+ // Check if {target} is a JSFunction.
+ Comment("check if target is a JSFunction");
+ TNode<HeapObject> target = CAST(maybe_target);
+ GotoIfNot(IsJSFunction(target), &mark_megamorphic);
+
+ // Check if {target}s feedback vector cell matches the {feedback_value}.
+ TNode<HeapObject> feedback_value = GetHeapObjectAssumeWeak(feedback);
+ TNode<Object> target_feedback_cell =
+ LoadObjectField(target, JSFunction::kFeedbackCellOffset);
+ Branch(TaggedEqual(feedback_value, target_feedback_cell), &done,
+ &try_transition_to_feedback_cell);
+
+ BIND(&try_transition_to_feedback_cell);
+ {
+ // Check if {target} and {feedback_value} are both JSFunctions with
+ // the same feedback vector cell, and that those functions were
+ // actually compiled already.
+ GotoIfNot(IsJSFunction(feedback_value), &mark_megamorphic);
+ TNode<HeapObject> feedback_cell = CAST(
+ LoadObjectField(feedback_value, JSFunction::kFeedbackCellOffset));
+ GotoIfNot(TaggedEqual(feedback_cell, target_feedback_cell),
+ &mark_megamorphic);
+ GotoIfNot(IsFeedbackCell(feedback_cell), &mark_megamorphic);
+
+ // Record the feedback vector cell.
+ Comment("transition to polymorphic");
+ StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
+ feedback_cell);
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Call:FeedbackVectorCell");
+ Goto(&done);
+ }
+ }
BIND(&initialize);
{
@@ -255,7 +296,8 @@ void CodeStubAssembler::CollectCallFeedback(
IncrementCallCount(feedback_vector, slot_id);
// Collect the callable {target} feedback.
- CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id);
+ CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id,
+ CallableFeedbackMode::kCollectFeedbackCell);
Goto(&feedback_done);
BIND(&feedback_done);
@@ -718,6 +760,49 @@ TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
return Int32TrueConstant();
}
+TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
+ TNode<TaggedIndex> value) {
+ return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
+ IntPtrConstant(kSmiTagSize)));
+}
+
+TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex(
+ TNode<IntPtrT> value) {
+ return ReinterpretCast<TaggedIndex>(
+ BitcastWordToTaggedSigned(WordShl(value, IntPtrConstant(kSmiTagSize))));
+}
+
+TNode<Smi> CodeStubAssembler::TaggedIndexToSmi(TNode<TaggedIndex> value) {
+ if (SmiValuesAre32Bits()) {
+ DCHECK_EQ(kSmiShiftSize, 31);
+ return BitcastWordToTaggedSigned(
+ WordShl(BitcastTaggedToWordForTagAndSmiBits(value),
+ IntPtrConstant(kSmiShiftSize)));
+ }
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK_EQ(kSmiShiftSize, 0);
+ return ReinterpretCast<Smi>(value);
+}
+
+TNode<TaggedIndex> CodeStubAssembler::SmiToTaggedIndex(TNode<Smi> value) {
+ if (kSystemPointerSize == kInt32Size) {
+ return ReinterpretCast<TaggedIndex>(value);
+ }
+ if (SmiValuesAre32Bits()) {
+ DCHECK_EQ(kSmiShiftSize, 31);
+ return ReinterpretCast<TaggedIndex>(BitcastWordToTaggedSigned(
+ WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
+ IntPtrConstant(kSmiShiftSize))));
+ }
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK_EQ(kSmiShiftSize, 0);
+ // Just sign-extend the lower 32 bits.
+ TNode<Int32T> raw =
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(value));
+ return ReinterpretCast<TaggedIndex>(
+ BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw)));
+}
+
TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
if (COMPRESS_POINTERS_BOOL) {
TNode<Int32T> raw =
@@ -1497,8 +1582,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(SloppyTNode<Object> value,
TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
TNode<RawPtrT> frame_pointer = LoadParentFramePointer();
- return CAST(
- Load(MachineType::AnyTagged(), frame_pointer, IntPtrConstant(offset)));
+ return LoadFullTagged(frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
@@ -1521,8 +1605,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
#endif
- return ChangeInt32ToIntPtr(
- LoadObjectField(object, offset, MachineType::Int32()));
+ return ChangeInt32ToIntPtr(LoadObjectField<Int32T>(object, offset));
} else {
return SmiToIntPtr(
LoadObjectField(object, offset, MachineType::TaggedSigned()));
@@ -1551,9 +1634,10 @@ TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
object, HeapNumber::kValueOffset, MachineType::Float64()));
}
-TNode<Map> CodeStubAssembler::GetStructMap(InstanceType instance_type) {
- Handle<Map> map_handle(Map::GetStructMap(isolate(), instance_type),
- isolate());
+TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
+ Handle<Map> map_handle(
+ Map::GetInstanceTypeMap(ReadOnlyRoots(isolate()), instance_type),
+ isolate());
return HeapConstant(map_handle);
}
@@ -1743,8 +1827,8 @@ TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- return ChangeInt32ToIntPtr(LoadObjectField(
- map, Map::kInstanceSizeInWordsOffset, MachineType::Uint8()));
+ return ChangeInt32ToIntPtr(
+ LoadObjectField<Uint8T>(map, Map::kInstanceSizeInWordsOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
@@ -1752,9 +1836,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
- return ChangeInt32ToIntPtr(LoadObjectField(
- map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
- MachineType::Uint8()));
+ return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
+ map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
@@ -1762,9 +1845,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
- return ChangeInt32ToIntPtr(LoadObjectField(
- map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
- MachineType::Uint8()));
+ return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
+ map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
}
TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
@@ -2443,7 +2525,7 @@ TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
}
template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
- TNode<FeedbackVector> feedback_vector, TNode<Smi> slot,
+ TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
int additional_offset);
template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot,
@@ -2854,8 +2936,9 @@ void CodeStubAssembler::StoreObjectFieldRoot(TNode<HeapObject> object,
}
void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
- Node* object, Node* index_node, Node* value, WriteBarrierMode barrier_mode,
- int additional_offset, ParameterMode parameter_mode) {
+ TNode<UnionT<FixedArray, PropertyArray>> object, Node* index_node,
+ TNode<Object> value, WriteBarrierMode barrier_mode, int additional_offset,
+ ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(
this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
@@ -2964,8 +3047,9 @@ TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Context> context,
}
void CodeStubAssembler::PossiblyGrowElementsCapacity(
- ParameterMode mode, ElementsKind kind, Node* array, Node* length,
- Variable* var_elements, Node* growth, Label* bailout) {
+ ParameterMode mode, ElementsKind kind, TNode<HeapObject> array,
+ Node* length, TVariable<FixedArrayBase>* var_elements, Node* growth,
+ Label* bailout) {
Label fits(this, var_elements);
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode);
@@ -2974,9 +3058,8 @@ void CodeStubAssembler::PossiblyGrowElementsCapacity(
Node* new_length = IntPtrOrSmiAdd(growth, length, mode);
GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
- var_elements->Bind(GrowElementsCapacity(array, var_elements->value(), kind,
- kind, capacity, new_capacity, mode,
- bailout));
+ *var_elements = GrowElementsCapacity(array, var_elements->value(), kind, kind,
+ capacity, new_capacity, mode, bailout);
Goto(&fits);
BIND(&fits);
}
@@ -3035,17 +3118,19 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
ParameterMode mode, Label* bailout,
- Node* elements, Node* index,
- Node* value) {
+ TNode<FixedArrayBase> elements,
+ Node* index, TNode<Object> value) {
if (IsSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(value), bailout);
} else if (IsDoubleElementsKind(kind)) {
- GotoIfNotNumber(CAST(value), bailout);
+ GotoIfNotNumber(value, bailout);
}
if (IsDoubleElementsKind(kind)) {
- value = ChangeNumberToFloat64(CAST(value));
+ StoreElement(elements, kind, index, ChangeNumberToFloat64(CAST(value)),
+ mode);
+ } else {
+ StoreElement(elements, kind, index, value, mode);
}
- StoreElement(elements, kind, index, value, mode);
}
void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
@@ -3321,7 +3406,6 @@ TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
offset);
}
-
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
int at_least_space_for) {
return AllocateNameDictionary(IntPtrConstant(at_least_space_for));
@@ -3426,7 +3510,7 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTable() {
const ElementsKind elements_kind = HOLEY_ELEMENTS;
TNode<IntPtrT> length_intptr = IntPtrConstant(kFixedArrayLength);
TNode<Map> fixed_array_map =
- CAST(LoadRoot(CollectionType::GetMapRootIndex()));
+ HeapConstant(CollectionType::GetMap(ReadOnlyRoots(isolate())));
TNode<CollectionType> table =
CAST(AllocateFixedArray(elements_kind, length_intptr,
kAllowLargeObjectAllocation, fixed_array_map));
@@ -3557,9 +3641,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
// The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value.
- TNode<IntPtrT> used_size = Signed(TimesTaggedSize(ChangeUint32ToWord(
- LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
- MachineType::Uint8()))));
+ TNode<IntPtrT> used_size =
+ Signed(TimesTaggedSize(ChangeUint32ToWord(LoadObjectField<Uint8T>(
+ map, Map::kUsedOrUnusedInstanceSizeInWordsOffset))));
Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size,
@@ -3948,7 +4032,8 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
Node* capacity, SloppyTNode<Map> source_map, ElementsKind from_kind,
AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
ParameterMode parameter_mode, HoleConversionMode convert_holes,
- TVariable<BoolT>* var_holes_converted, Node* source_elements_kind) {
+ TVariable<BoolT>* var_holes_converted,
+ base::Optional<TNode<Int32T>> source_elements_kind) {
DCHECK_NE(first, nullptr);
DCHECK_NE(count, nullptr);
DCHECK_NE(capacity, nullptr);
@@ -4070,9 +4155,9 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// Try to use memcpy if we don't need to convert holes to undefined.
if (convert_holes == HoleConversionMode::kDontConvert &&
- source_elements_kind != nullptr) {
+ source_elements_kind) {
// Only try memcpy if we're not copying object pointers.
- GotoIfNot(IsFastSmiElementsKind(source_elements_kind),
+ GotoIfNot(IsFastSmiElementsKind(*source_elements_kind),
&copy_one_by_one);
const ElementsKind to_smi_kind = PACKED_SMI_ELEMENTS;
@@ -4116,8 +4201,8 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
}
TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
- Node* from_array, Node* first, Node* count, Node* capacity,
- Node* fixed_array_map, TVariable<BoolT>* var_holes_converted,
+ TNode<FixedArrayBase> from_array, Node* first, Node* count, Node* capacity,
+ TNode<Map> fixed_array_map, TVariable<BoolT>* var_holes_converted,
AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
ParameterMode mode) {
DCHECK_NE(first, nullptr);
@@ -4169,7 +4254,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
IntPtrSub(var_from_offset.value(), IntPtrConstant(kDoubleSize));
var_from_offset = from_offset;
- Node* to_offset = from_offset;
+ TNode<IntPtrT> to_offset = from_offset;
Label if_hole(this);
@@ -4202,9 +4287,10 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
}
TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
- Node* source, Node* first, Node* count, Node* capacity,
+ TNode<FixedArrayBase> source, Node* first, Node* count, Node* capacity,
ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode,
- TVariable<BoolT>* var_holes_converted, Node* source_runtime_kind) {
+ TVariable<BoolT>* var_holes_converted,
+ base::Optional<TNode<Int32T>> source_runtime_kind) {
DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays ||
extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays);
// If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should not
@@ -4282,7 +4368,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
kind, capacity, parameter_mode, allocation_flags, source_map);
FillFixedArrayWithValue(kind, to_elements, count, capacity,
RootIndex::kTheHoleValue, parameter_mode);
- CopyElements(kind, to_elements, IntPtrConstant(0), CAST(source),
+ CopyElements(kind, to_elements, IntPtrConstant(0), source,
ParameterToIntPtr(first, parameter_mode),
ParameterToIntPtr(count, parameter_mode));
var_result = to_elements;
@@ -4317,9 +4403,8 @@ void CodeStubAssembler::InitializePropertyArrayLength(
ParameterToTagged(length, mode));
}
-Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node,
- ParameterMode mode,
- AllocationFlags flags) {
+TNode<PropertyArray> CodeStubAssembler::AllocatePropertyArray(
+ Node* capacity_node, ParameterMode mode, AllocationFlags flags) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
@@ -4330,17 +4415,16 @@ Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node,
RootIndex map_index = RootIndex::kPropertyArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
- InitializePropertyArrayLength(CAST(array), capacity_node, mode);
- return array;
+ TNode<PropertyArray> property_array = CAST(array);
+ InitializePropertyArrayLength(property_array, capacity_node, mode);
+ return property_array;
}
-void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
- Node* from_node,
- Node* to_node,
- ParameterMode mode) {
+void CodeStubAssembler::FillPropertyArrayWithUndefined(
+ TNode<PropertyArray> array, Node* from_node, Node* to_node,
+ ParameterMode mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
- CSA_SLOW_ASSERT(this, IsPropertyArray(array));
ElementsKind kind = PACKED_ELEMENTS;
TNode<Oddball> value = UndefinedConstant();
BuildFastFixedArrayForEach(
@@ -4352,7 +4436,8 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
mode);
}
-void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
+void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
+ TNode<FixedArrayBase> array,
Node* from_node, Node* to_node,
RootIndex value_root_index,
ParameterMode mode) {
@@ -4641,10 +4726,11 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
}
void CodeStubAssembler::CopyFixedArrayElements(
- ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
- Node* to_array, Node* first_element, Node* element_count, Node* capacity,
- WriteBarrierMode barrier_mode, ParameterMode mode,
- HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
+ ElementsKind from_kind, TNode<FixedArrayBase> from_array,
+ ElementsKind to_kind, TNode<FixedArrayBase> to_array, Node* first_element,
+ Node* element_count, Node* capacity, WriteBarrierMode barrier_mode,
+ ParameterMode mode, HoleConversionMode convert_holes,
+ TVariable<BoolT>* var_holes_converted) {
DCHECK_IMPLIES(var_holes_converted != nullptr,
convert_holes == HoleConversionMode::kConvertToUndefined);
CSA_SLOW_ASSERT(this, MatchesParameterMode(element_count, mode));
@@ -4719,10 +4805,10 @@ void CodeStubAssembler::CopyFixedArrayElements(
var_holes_converted != nullptr ? arraysize(vars) : arraysize(vars) - 1;
Label decrement(this, num_vars, vars);
- Node* to_array_adjusted =
+ TNode<IntPtrT> to_array_adjusted =
element_offset_matches
? IntPtrSub(BitcastTaggedToWord(to_array), first_from_element_offset)
- : to_array;
+ : ReinterpretCast<IntPtrT>(to_array);
Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
@@ -4825,8 +4911,8 @@ TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
return UncheckedCast<FixedArray>(base);
}
-void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
- Node* to_array,
+void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
+ TNode<PropertyArray> to_array,
Node* property_count,
WriteBarrierMode barrier_mode,
ParameterMode mode,
@@ -4834,7 +4920,6 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
CSA_SLOW_ASSERT(this, MatchesParameterMode(property_count, mode));
CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array),
IsEmptyFixedArray(from_array)));
- CSA_SLOW_ASSERT(this, IsPropertyArray(to_array));
Comment("[ CopyPropertyArrayValues");
bool needs_write_barrier = barrier_mode == UPDATE_WRITE_BARRIER;
@@ -4871,7 +4956,8 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
if (destroy_source == DestroySource::kYes) {
Label did_zap(this);
GotoIf(IsEmptyFixedArray(from_array), &did_zap);
- FillPropertyArrayWithUndefined(from_array, start, property_count, mode);
+ FillPropertyArrayWithUndefined(CAST(from_array), start, property_count,
+ mode);
Goto(&did_zap);
BIND(&did_zap);
@@ -4920,11 +5006,9 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
}
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
- Node* object, Node* elements, ElementsKind kind, Node* key,
- Label* bailout) {
- CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
+ TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
+ TNode<Smi> key, Label* bailout) {
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
- CSA_SLOW_ASSERT(this, TaggedIsSmi(key));
TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
ParameterMode mode = OptimalParameterMode();
@@ -4934,10 +5018,9 @@ TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
}
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
- Node* object, Node* elements, ElementsKind kind, Node* key, Node* capacity,
- ParameterMode mode, Label* bailout) {
+ TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
+ Node* key, Node* capacity, ParameterMode mode, Label* bailout) {
Comment("TryGrowElementsCapacity");
- CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(key, mode));
@@ -4955,10 +5038,10 @@ TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
}
TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
- Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
- Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
+ TNode<HeapObject> object, TNode<FixedArrayBase> elements,
+ ElementsKind from_kind, ElementsKind to_kind, Node* capacity,
+ Node* new_capacity, ParameterMode mode, Label* bailout) {
Comment("[ GrowElementsCapacity");
- CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(new_capacity, mode));
@@ -4977,12 +5060,12 @@ TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
// Copy the elements from the old elements store to the new.
// The size-check above guarantees that the |new_elements| is allocated
// in new space so we can skip the write barrier.
- CopyFixedArrayElements(from_kind, CAST(elements), to_kind, new_elements,
+ CopyFixedArrayElements(from_kind, elements, to_kind, new_elements,
UncheckedCast<IntPtrT>(capacity),
UncheckedCast<IntPtrT>(new_capacity),
SKIP_WRITE_BARRIER, mode);
- StoreObjectField(CAST(object), JSObject::kElementsOffset, new_elements);
+ StoreObjectField(object, JSObject::kElementsOffset, new_elements);
Comment("] GrowElementsCapacity");
return new_elements;
}
@@ -5616,6 +5699,10 @@ TNode<BoolT> CodeStubAssembler::IsCallableMap(SloppyTNode<Map> map) {
return IsSetWord32<Map::Bits1::IsCallableBit>(LoadMapBitField(map));
}
+TNode<BoolT> CodeStubAssembler::IsCoverageInfo(TNode<HeapObject> object) {
+ return IsCoverageInfoMap(LoadMap(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsDebugInfo(TNode<HeapObject> object) {
return HasInstanceType(object, DEBUG_INFO_TYPE);
}
@@ -6037,13 +6124,13 @@ TNode<BoolT> CodeStubAssembler::IsPromiseFulfillReactionJobTask(
// TODO(jgruber): It might we worth creating an empty_double_array constant to
// simplify this case.
TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKindOrEmpty(
- SloppyTNode<HeapObject> object, ElementsKind kind) {
+ SloppyTNode<FixedArrayBase> object, ElementsKind kind) {
Label out(this);
TVARIABLE(BoolT, var_result, Int32TrueConstant());
GotoIf(IsFixedArrayWithKind(object, kind), &out);
- const TNode<Smi> length = LoadFixedArrayBaseLength(CAST(object));
+ const TNode<Smi> length = LoadFixedArrayBaseLength(object);
GotoIf(SmiEqual(length, SmiConstant(0)), &out);
var_result = Int32FalseConstant();
@@ -6151,13 +6238,35 @@ TNode<BoolT> CodeStubAssembler::IsUniqueName(TNode<HeapObject> object) {
[=] { return IsSymbolInstanceType(instance_type); });
}
+// Semantics: guaranteed not to be an integer index (i.e. contains non-digit
+// characters, or is outside MAX_SAFE_INTEGER/size_t range). Note that for
+// non-TypedArray receivers, there are additional strings that must be treated
+// as named property keys, namely the range [0xFFFFFFFF, MAX_SAFE_INTEGER].
TNode<BoolT> CodeStubAssembler::IsUniqueNameNoIndex(TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return Select<BoolT>(
IsInternalizedStringInstanceType(instance_type),
[=] {
return IsSetWord32(LoadNameHashField(CAST(object)),
- Name::kIsNotArrayIndexMask);
+ Name::kIsNotIntegerIndexMask);
+ },
+ [=] { return IsSymbolInstanceType(instance_type); });
+}
+
+// Semantics: {object} is a Symbol, or a String that doesn't have a cached
+// index. This returns {true} for strings containing representations of
+// integers in the range above 9999999 (per kMaxCachedArrayIndexLength)
+// and below MAX_SAFE_INTEGER. For CSA_ASSERTs ensuring correct usage, this is
+// better than no checking; and we don't have a good/fast way to accurately
+// check such strings for being within "array index" (uint32_t) range.
+TNode<BoolT> CodeStubAssembler::IsUniqueNameNoCachedIndex(
+ TNode<HeapObject> object) {
+ TNode<Uint16T> instance_type = LoadInstanceType(object);
+ return Select<BoolT>(
+ IsInternalizedStringInstanceType(instance_type),
+ [=] {
+ return IsSetWord32(LoadNameHashField(CAST(object)),
+ Name::kDoesNotContainCachedArrayIndexMask);
},
[=] { return IsSymbolInstanceType(instance_type); });
}
@@ -6418,9 +6527,8 @@ TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(TNode<Number> number) {
[=] { return IsHeapNumberUint32(CAST(number)); });
}
-Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
- int base_size,
- ParameterMode mode) {
+TNode<BoolT> CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(
+ Node* element_count, int base_size, ParameterMode mode) {
int max_newspace_elements =
(kMaxRegularHeapObjectSize - base_size) / kTaggedSize;
return IntPtrOrSmiGreaterThan(
@@ -6826,6 +6934,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
}
BIND(&if_inputisbigint);
+ CSA_ASSERT(this, Word32And(TaggedIsNotSmi(context), IsContext(context)));
if (mode == Object::Conversion::kToNumeric) {
var_result = CAST(input);
Goto(&end);
@@ -6850,6 +6959,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&if_inputisreceiver);
{
+ CSA_ASSERT(this, Word32And(TaggedIsNotSmi(context), IsContext(context)));
// The {input} is a JSReceiver, we need to convert it to a Primitive first
// using the ToPrimitive type conversion, preferably yielding a Number.
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
@@ -6880,6 +6990,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&if_inputisother);
{
+ CSA_ASSERT(this, Word32And(TaggedIsNotSmi(context), IsContext(context)));
// The {input} is something else (e.g. Symbol), let the runtime figure
// out the correct exception.
// Note: We cannot tail call to the runtime here, as js-to-wasm
@@ -7208,82 +7319,6 @@ TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
[=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); });
}
-TNode<Number> CodeStubAssembler::ToInteger_Inline(
- SloppyTNode<Context> context, SloppyTNode<Object> input,
- ToIntegerTruncationMode mode) {
- Builtins::Name builtin = (mode == kNoTruncation)
- ? Builtins::kToInteger
- : Builtins::kToInteger_TruncateMinusZero;
- return Select<Number>(
- TaggedIsSmi(input), [=] { return CAST(input); },
- [=] { return CAST(CallBuiltin(builtin, context, input)); });
-}
-
-TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
- SloppyTNode<Object> input,
- ToIntegerTruncationMode mode) {
- // We might need to loop once for ToNumber conversion.
- TVARIABLE(Object, var_arg, input);
- Label loop(this, &var_arg), out(this);
- Goto(&loop);
- BIND(&loop);
- {
- // Shared entry points.
- Label return_zero(this, Label::kDeferred);
-
- // Load the current {arg} value.
- TNode<Object> arg = var_arg.value();
-
- // Check if {arg} is a Smi.
- GotoIf(TaggedIsSmi(arg), &out);
-
- // Check if {arg} is a HeapNumber.
- Label if_argisheapnumber(this),
- if_argisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumber(CAST(arg)), &if_argisheapnumber,
- &if_argisnotheapnumber);
-
- BIND(&if_argisheapnumber);
- {
- TNode<HeapNumber> arg_hn = CAST(arg);
- // Load the floating-point value of {arg}.
- TNode<Float64T> arg_value = LoadHeapNumberValue(arg_hn);
-
- // Check if {arg} is NaN.
- GotoIfNot(Float64Equal(arg_value, arg_value), &return_zero);
-
- // Truncate {arg} towards zero.
- TNode<Float64T> value = Float64Trunc(arg_value);
-
- if (mode == kTruncateMinusZero) {
- // Truncate -0.0 to 0.
- GotoIf(Float64Equal(value, Float64Constant(0.0)), &return_zero);
- }
-
- var_arg = ChangeFloat64ToTagged(value);
- Goto(&out);
- }
-
- BIND(&if_argisnotheapnumber);
- {
- // Need to convert {arg} to a Number first.
- var_arg = UncheckedCast<Object>(
- CallBuiltin(Builtins::kNonNumberToNumber, context, arg));
- Goto(&loop);
- }
-
- BIND(&return_zero);
- var_arg = SmiConstant(0);
- Goto(&out);
- }
-
- BIND(&out);
- if (mode == kTruncateMinusZero) {
- CSA_ASSERT(this, IsNumberNormalized(CAST(var_arg.value())));
- }
- return CAST(var_arg.value());
-}
-
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
@@ -7419,7 +7454,7 @@ void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
&if_has_cached_index);
// No cached array index. If the string knows that it contains an index,
// then it must be an uncacheable index. Handle this case in the runtime.
- GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
+ GotoIf(IsClearWord32(hash, Name::kIsNotIntegerIndexMask), if_bailout);
GotoIf(InstanceTypeEqual(var_instance_type.value(), THIN_STRING_TYPE),
&if_thinstring);
@@ -7742,7 +7777,7 @@ void CodeStubAssembler::NumberDictionaryLookup(
TNode<IntPtrT> mask = IntPtrSub(capacity, IntPtrConstant(1));
TNode<UintPtrT> hash = ChangeUint32ToWord(ComputeSeededHash(intptr_index));
- Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
+ TNode<Float64T> key_as_float64 = RoundIntPtrToFloat64(intptr_index);
// See Dictionary::FirstProbe().
TNode<IntPtrT> count = IntPtrConstant(0);
@@ -8411,7 +8446,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
Label* if_not_found) {
CSA_ASSERT(this, IsSimpleObjectMap(map));
- CSA_ASSERT(this, IsUniqueNameNoIndex(unique_name));
+ CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
@@ -8436,7 +8471,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
void CodeStubAssembler::TryLookupProperty(
- SloppyTNode<JSReceiver> object, SloppyTNode<Map> map,
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name,
Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
@@ -8460,7 +8495,7 @@ void CodeStubAssembler::TryLookupProperty(
Map::Bits1::IsAccessCheckNeededBit::kMask;
GotoIf(IsSetWord32(bit_field, mask), if_bailout);
- TNode<GlobalDictionary> dictionary = CAST(LoadSlowProperties(object));
+ TNode<GlobalDictionary> dictionary = CAST(LoadSlowProperties(CAST(object)));
*var_meta_storage = dictionary;
NameDictionaryLookup<GlobalDictionary>(
@@ -8468,13 +8503,14 @@ void CodeStubAssembler::TryLookupProperty(
}
}
-void CodeStubAssembler::TryHasOwnProperty(Node* object, Node* map,
- Node* instance_type,
- Node* unique_name, Label* if_found,
- Label* if_not_found,
+void CodeStubAssembler::TryHasOwnProperty(TNode<HeapObject> object,
+ TNode<Map> map,
+ TNode<Int32T> instance_type,
+ TNode<Name> unique_name,
+ Label* if_found, Label* if_not_found,
Label* if_bailout) {
Comment("TryHasOwnProperty");
- CSA_ASSERT(this, IsUniqueNameNoIndex(CAST(unique_name)));
+ CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_name_index);
@@ -8671,7 +8707,7 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
{
if (mode == kCallJSGetter) {
Label if_callable(this), if_function_template_info(this);
- Node* accessor_pair = value;
+ TNode<AccessorPair> accessor_pair = CAST(value);
TNode<HeapObject> getter =
CAST(LoadObjectField(accessor_pair, AccessorPair::kGetterOffset));
TNode<Map> getter_map = LoadMap(getter);
@@ -8686,8 +8722,7 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
BIND(&if_callable);
{
// Call the accessor.
- Callable callable = CodeFactory::Call(isolate());
- var_value = CallJS(callable, context, getter, receiver);
+ var_value = Call(context, getter, receiver);
Goto(&done);
}
@@ -8790,7 +8825,7 @@ void CodeStubAssembler::TryGetOwnProperty(
GetOwnPropertyMode mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
- CSA_ASSERT(this, IsUniqueNameNoIndex(unique_name));
+ CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_entry);
@@ -9276,17 +9311,18 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
TNode<TIndex> index_node, ElementsKind kind, int base_size) {
// TODO(v8:9708): Remove IntPtrT variant in favor of UintPtrT.
static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, TaggedIndex>::value ||
std::is_same<TIndex, IntPtrT>::value ||
std::is_same<TIndex, UintPtrT>::value,
"Only Smi, UintPtrT or IntPtrT index nodes are allowed");
int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
- int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
intptr_t index = 0;
TNode<IntPtrT> intptr_index_node;
bool constant_index = false;
if (std::is_same<TIndex, Smi>::value) {
TNode<Smi> smi_index_node = ReinterpretCast<Smi>(index_node);
+ int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
element_size_shift -= kSmiShiftBits;
Smi smi_index;
constant_index = ToSmiConstant(smi_index_node, &smi_index);
@@ -9298,6 +9334,12 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
}
}
intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node);
+ } else if (std::is_same<TIndex, TaggedIndex>::value) {
+ TNode<TaggedIndex> tagged_index_node =
+ ReinterpretCast<TaggedIndex>(index_node);
+ element_size_shift -= kSmiTagSize;
+ intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(tagged_index_node);
+ constant_index = ToIntPtrConstant(intptr_index_node, &index);
} else {
intptr_index_node = ReinterpretCast<IntPtrT>(index_node);
constant_index = ToIntPtrConstant(intptr_index_node, &index);
@@ -9323,6 +9365,9 @@ CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node,
ElementsKind kind,
int base_size);
template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::ElementOffsetFromIndex<TaggedIndex>(
+ TNode<TaggedIndex> index_node, ElementsKind kind, int base_size);
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::ElementOffsetFromIndex<IntPtrT>(TNode<IntPtrT> index_node,
ElementsKind kind,
int base_size);
@@ -9936,8 +9981,8 @@ void CodeStubAssembler::EmitElementStore(
IsNonextensibleElementsKind(elements_kind))) {
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (IsCOWHandlingStoreMode(store_mode)) {
- elements = CAST(CopyElementsOnWrite(object, elements, elements_kind, length,
- parameter_mode, bailout));
+ elements = CopyElementsOnWrite(object, elements, elements_kind, length,
+ parameter_mode, bailout);
}
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
@@ -9953,7 +9998,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
Label grow_case(this), no_grow_case(this), done(this),
grow_bailout(this, Label::kDeferred);
- Node* condition;
+ TNode<BoolT> condition;
if (IsHoleyElementsKind(kind)) {
condition = UintPtrGreaterThanOrEqual(key, length);
} else {
@@ -10011,20 +10056,19 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
return checked_elements.value();
}
-Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
- ElementsKind kind, Node* length,
- ParameterMode mode,
- Label* bailout) {
- VARIABLE(new_elements_var, MachineRepresentation::kTagged, elements);
+TNode<FixedArrayBase> CodeStubAssembler::CopyElementsOnWrite(
+ TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
+ Node* length, ParameterMode mode, Label* bailout) {
+ TVARIABLE(FixedArrayBase, new_elements_var, elements);
Label done(this);
GotoIfNot(IsFixedCOWArrayMap(LoadMap(elements)), &done);
{
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
- Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
- length, capacity, mode, bailout);
- new_elements_var.Bind(new_elements);
+ TNode<FixedArrayBase> new_elements = GrowElementsCapacity(
+ object, elements, kind, kind, length, capacity, mode, bailout);
+ new_elements_var = new_elements;
Goto(&done);
}
@@ -12135,9 +12179,7 @@ TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object,
GotoIf(IsUndefined(inst_of_handler), &if_nohandler);
// Call the {inst_of_handler} for {callable} and {object}.
- Node* result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, inst_of_handler, callable, object);
+ TNode<Object> result = Call(context, inst_of_handler, callable, object);
// Convert the {result} to a Boolean.
BranchIfToBooleanIsTrue(result, &return_true, &return_false);
@@ -12428,11 +12470,6 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
return Construct(context, constructor, len);
}
-TNode<BoolT> CodeStubAssembler::IsDetachedBuffer(TNode<JSArrayBuffer> buffer) {
- TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(buffer);
- return IsSetWord32<JSArrayBuffer::WasDetachedBit>(buffer_bit_field);
-}
-
void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
SloppyTNode<Context> context, TNode<JSArrayBuffer> array_buffer,
const char* method_name) {
@@ -12492,30 +12529,52 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
+#ifdef V8_REVERSE_JSARGS
+ TNode<IntPtrT> offset = assembler_->IntPtrConstant(
+ (StandardFrameConstants::kFixedSlotCountAboveFp + 1) *
+ kSystemPointerSize);
+#else
TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
argc_, SYSTEM_POINTER_ELEMENTS,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
kSystemPointerSize);
+#endif
+ // base_ points to the first argument, not the receiver
+ // whether present or not.
base_ = assembler_->RawPtrAdd(fp_, offset);
}
TNode<Object> CodeStubArguments::GetReceiver() const {
DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
- return assembler_->UncheckedCast<Object>(assembler_->LoadFullTagged(
- base_, assembler_->IntPtrConstant(kSystemPointerSize)));
+#ifdef V8_REVERSE_JSARGS
+ intptr_t offset = -kSystemPointerSize;
+#else
+ intptr_t offset = kSystemPointerSize;
+#endif
+ return assembler_->LoadFullTagged(base_, assembler_->IntPtrConstant(offset));
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
+#ifdef V8_REVERSE_JSARGS
+ intptr_t offset = -kSystemPointerSize;
+#else
+ intptr_t offset = kSystemPointerSize;
+#endif
assembler_->StoreFullTaggedNoWriteBarrier(
- base_, assembler_->IntPtrConstant(kSystemPointerSize), object);
+ base_, assembler_->IntPtrConstant(offset), object);
}
TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
+#ifdef V8_REVERSE_JSARGS
+ TNode<IntPtrT> offset =
+ assembler_->ElementOffsetFromIndex(index, SYSTEM_POINTER_ELEMENTS, 0);
+#else
TNode<IntPtrT> negated_index =
assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index);
TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
negated_index, SYSTEM_POINTER_ELEMENTS, 0);
+#endif
return assembler_->RawPtrAdd(base_, offset);
}
@@ -12579,18 +12638,20 @@ void CodeStubArguments::ForEach(
if (last == nullptr) {
last = argc_;
}
- TNode<RawPtrT> start = assembler_->RawPtrSub(
- base_,
- assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS));
- TNode<RawPtrT> end = assembler_->RawPtrSub(
- base_, assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS));
+ TNode<RawPtrT> start = AtIndexPtr(first);
+ TNode<RawPtrT> end = AtIndexPtr(last);
+#ifdef V8_REVERSE_JSARGS
+ const int increment = kSystemPointerSize;
+#else
+ const int increment = -kSystemPointerSize;
+#endif
assembler_->BuildFastLoop<RawPtrT>(
vars, start, end,
[&](TNode<RawPtrT> current) {
- TNode<Object> arg = assembler_->Load<Object>(current);
+ TNode<Object> arg = assembler_->LoadFullTagged(current);
body(arg);
},
- -kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
+ increment, CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
@@ -12852,53 +12913,50 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
return CAST(fun);
}
-void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
- Node* receiver_map,
+void CodeStubAssembler::CheckPrototypeEnumCache(TNode<JSReceiver> receiver,
+ TNode<Map> receiver_map,
Label* if_fast,
Label* if_slow) {
- VARIABLE(var_object, MachineRepresentation::kTagged, receiver);
- VARIABLE(var_object_map, MachineRepresentation::kTagged, receiver_map);
+ TVARIABLE(JSReceiver, var_object, receiver);
+ TVARIABLE(Map, object_map, receiver_map);
- Label loop(this, {&var_object, &var_object_map}), done_loop(this);
+ Label loop(this, {&var_object, &object_map}), done_loop(this);
Goto(&loop);
BIND(&loop);
{
- // Check that there are no elements on the current {object}.
+ // Check that there are no elements on the current {var_object}.
Label if_no_elements(this);
- Node* object = var_object.value();
- Node* object_map = var_object_map.value();
// The following relies on the elements only aliasing with JSProxy::target,
- // which is a Javascript value and hence cannot be confused with an elements
+ // which is a JavaScript value and hence cannot be confused with an elements
// backing store.
STATIC_ASSERT(static_cast<int>(JSObject::kElementsOffset) ==
static_cast<int>(JSProxy::kTargetOffset));
TNode<Object> object_elements =
- LoadObjectField(object, JSObject::kElementsOffset);
+ LoadObjectField(var_object.value(), JSObject::kElementsOffset);
GotoIf(IsEmptyFixedArray(object_elements), &if_no_elements);
GotoIf(IsEmptySlowElementDictionary(object_elements), &if_no_elements);
// It might still be an empty JSArray.
- GotoIfNot(IsJSArrayMap(object_map), if_slow);
- TNode<Number> object_length = LoadJSArrayLength(CAST(object));
+ GotoIfNot(IsJSArrayMap(object_map.value()), if_slow);
+ TNode<Number> object_length = LoadJSArrayLength(CAST(var_object.value()));
Branch(TaggedEqual(object_length, SmiConstant(0)), &if_no_elements,
if_slow);
- // Continue with the {object}s prototype.
+ // Continue with {var_object}'s prototype.
BIND(&if_no_elements);
- object = LoadMapPrototype(object_map);
+ TNode<HeapObject> object = LoadMapPrototype(object_map.value());
GotoIf(IsNull(object), if_fast);
// For all {object}s but the {receiver}, check that the cache is empty.
- var_object.Bind(object);
+ var_object = CAST(object);
object_map = LoadMap(object);
- var_object_map.Bind(object_map);
- TNode<WordT> object_enum_length = LoadMapEnumLength(object_map);
+ TNode<WordT> object_enum_length = LoadMapEnumLength(object_map.value());
Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &loop, if_slow);
}
}
-TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<HeapObject> receiver,
+TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
Label* if_empty,
Label* if_runtime) {
Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred);
@@ -12916,7 +12974,7 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<HeapObject> receiver,
// Avoid runtime-call for empty dictionary receivers.
GotoIfNot(IsDictionaryMap(receiver_map), if_runtime);
TNode<HashTableBase> properties =
- UncheckedCast<HashTableBase>(LoadSlowProperties(CAST(receiver)));
+ UncheckedCast<HashTableBase>(LoadSlowProperties(receiver));
CSA_ASSERT(this, Word32Or(IsNameDictionary(properties),
IsGlobalDictionary(properties)));
STATIC_ASSERT(static_cast<int>(NameDictionary::kNumberOfElementsIndex) ==
@@ -13018,6 +13076,62 @@ void CodeStubAssembler::InitializeSyntheticFunctionContext(
UndefinedConstant());
}
+TNode<Object> CodeStubAssembler::CallApiCallback(
+ TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
+ TNode<Object> data, TNode<Object> holder, TNode<Object> receiver) {
+ Callable callable = CodeFactory::CallApiCallback(isolate());
+ return CallStub(callable, context, callback, argc, data, holder, receiver);
+}
+
+TNode<Object> CodeStubAssembler::CallApiCallback(
+ TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
+ TNode<Object> data, TNode<Object> holder, TNode<Object> receiver,
+ TNode<Object> value) {
+ // CallApiCallback receives the first four arguments in registers
+ // (callback, argc, data and holder). The last arguments are in the stack in
+ // JS ordering. See ApiCallbackDescriptor.
+ Callable callable = CodeFactory::CallApiCallback(isolate());
+#ifdef V8_REVERSE_JSARGS
+ return CallStub(callable, context, callback, argc, data, holder, value,
+ receiver);
+#else
+ return CallStub(callable, context, callback, argc, data, holder, receiver,
+ value);
+#endif
+}
+
+TNode<Object> CodeStubAssembler::CallRuntimeNewArray(
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> length,
+ TNode<Object> new_target, TNode<Object> allocation_site) {
+ // Runtime_NewArray receives arguments in the JS order (to avoid unnecessary
+ // copy). Except the last two (new_target and allocation_site) which are add
+ // on top of the stack later.
+#ifdef V8_REVERSE_JSARGS
+ return CallRuntime(Runtime::kNewArray, context, length, receiver, new_target,
+ allocation_site);
+#else
+ return CallRuntime(Runtime::kNewArray, context, receiver, length, new_target,
+ allocation_site);
+#endif
+}
+
+void CodeStubAssembler::TailCallRuntimeNewArray(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Object> length,
+ TNode<Object> new_target,
+ TNode<Object> allocation_site) {
+ // Runtime_NewArray receives arguments in the JS order (to avoid unnecessary
+ // copy). Except the last two (new_target and allocation_site) which are add
+ // on top of the stack later.
+#ifdef V8_REVERSE_JSARGS
+ return TailCallRuntime(Runtime::kNewArray, context, length, receiver,
+ new_target, allocation_site);
+#else
+ return TailCallRuntime(Runtime::kNewArray, context, receiver, length,
+ new_target, allocation_site);
+#endif
+}
+
TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
TNode<Number> length) {
TVARIABLE(JSArray, array);
@@ -13040,8 +13154,8 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> array_function =
CAST(LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
- array = CAST(CallRuntime(Runtime::kNewArray, context, array_function,
- length, array_function, UndefinedConstant()));
+ array = CAST(CallRuntimeNewArray(context, array_function, length,
+ array_function, UndefinedConstant()));
Goto(&done);
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 6eb9baef69..618481ff47 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -18,6 +18,7 @@
#include "src/objects/promise.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
+#include "src/objects/tagged-index.h"
#include "src/roots/roots.h"
#include "torque-generated/exported-macros-assembler-tq.h"
@@ -52,6 +53,9 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector)
+#define TORQUE_INTERNAL_CLASS_LIST_CSA_ADAPTER(V, NAME, Name, name) \
+ V(Name##Map, name##_map, Name##Map)
+
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
@@ -71,6 +75,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap) \
V(ConsStringMap, cons_string_map, ConsStringMap) \
V(constructor_string, constructor_string, ConstructorString) \
+ V(CoverageInfoMap, coverage_info_map, CoverageInfoMap) \
V(date_to_string, date_to_string, DateToString) \
V(default_string, default_string, DefaultString) \
V(EmptyByteArray, empty_byte_array, EmptyByteArray) \
@@ -172,7 +177,9 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(UndefinedValue, undefined_value, Undefined) \
V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) \
- V(zero_string, zero_string, ZeroString)
+ V(zero_string, zero_string, ZeroString) \
+ TORQUE_INTERNAL_CLASS_LIST_GENERATOR(TORQUE_INTERNAL_CLASS_LIST_CSA_ADAPTER, \
+ V)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
@@ -273,6 +280,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
public TorqueGeneratedExportedMacrosAssembler {
public:
using Node = compiler::Node;
+ using ScopedExceptionHandler = compiler::ScopedExceptionHandler;
template <typename T>
using LazyNode = std::function<TNode<T>()>;
@@ -384,6 +392,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#error Unknown architecture.
#endif
+ TNode<IntPtrT> TaggedIndexToIntPtr(TNode<TaggedIndex> value);
+ TNode<TaggedIndex> IntPtrToTaggedIndex(TNode<IntPtrT> value);
+ // TODO(v8:10047): Get rid of these convertions eventually.
+ TNode<Smi> TaggedIndexToSmi(TNode<TaggedIndex> value);
+ TNode<TaggedIndex> SmiToTaggedIndex(TNode<Smi> value);
+
// Pointer compression specific. Returns true if the upper 32 bits of a Smi
// contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi
// can be directly used as an index in element offset computation.
@@ -878,6 +892,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
callable, receiver, args...));
}
+ TNode<Object> CallApiCallback(TNode<Object> context, TNode<RawPtrT> callback,
+ TNode<IntPtrT> argc, TNode<Object> data,
+ TNode<Object> holder, TNode<Object> receiver);
+
+ TNode<Object> CallApiCallback(TNode<Object> context, TNode<RawPtrT> callback,
+ TNode<IntPtrT> argc, TNode<Object> data,
+ TNode<Object> holder, TNode<Object> receiver,
+ TNode<Object> value);
+
+ TNode<Object> CallRuntimeNewArray(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Object> length,
+ TNode<Object> new_target,
+ TNode<Object> allocation_site);
+
+ void TailCallRuntimeNewArray(TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> length, TNode<Object> new_target,
+ TNode<Object> allocation_site);
+
template <class... TArgs>
TNode<JSReceiver> ConstructWithTarget(TNode<Context> context,
TNode<JSReceiver> target,
@@ -1036,9 +1069,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> LoadFromParentFrame(int offset);
// Load an object pointer from a buffer that isn't in the heap.
- template <typename T = Object>
- TNode<T> LoadBufferObject(TNode<RawPtrT> buffer, int offset) {
- return CAST(Load(MachineTypeOf<T>::value, buffer, IntPtrConstant(offset)));
+ TNode<Object> LoadBufferObject(TNode<RawPtrT> buffer, int offset) {
+ return LoadFullTagged(buffer, IntPtrConstant(offset));
}
template <typename T>
TNode<T> LoadBufferData(TNode<RawPtrT> buffer, int offset) {
@@ -1049,7 +1081,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return LoadBufferData<RawPtrT>(buffer, offset);
}
TNode<Smi> LoadBufferSmi(TNode<RawPtrT> buffer, int offset) {
- return LoadBufferObject<Smi>(buffer, offset);
+ return CAST(LoadBufferObject(buffer, offset));
}
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
@@ -1625,12 +1657,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
CheckBounds::kDebugOnly);
}
- void StoreFixedArrayOrPropertyArrayElement(
- Node* array, Node* index, Node* value,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
-
void StoreFixedArrayElement(
TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
@@ -1741,8 +1767,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* bailout);
void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
- Label* bailout, Node* elements, Node* index,
- Node* value);
+ Label* bailout, TNode<FixedArrayBase> elements,
+ Node* index, TNode<Object> value);
// Consumes args into the array, and returns tagged new length.
TNode<Smi> BuildAppendJSArray(ElementsKind kind, TNode<JSArray> array,
CodeStubArguments* args,
@@ -1939,7 +1965,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bailout);
TNode<Object> GetConstructor(TNode<Map> map);
- TNode<Map> GetStructMap(InstanceType instance_type);
+ TNode<Map> GetInstanceTypeMap(InstanceType instance_type);
TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
return UncheckedCast<FixedArray>(AllocateFixedArray(
@@ -1981,9 +2007,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return result;
}
- Node* AllocatePropertyArray(Node* capacity,
- ParameterMode mode = INTPTR_PARAMETERS,
- AllocationFlags flags = kNone);
+ TNode<PropertyArray> AllocatePropertyArray(
+ Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
+ AllocationFlags flags = kNone);
// Perform CreateArrayIterator (ES #sec-createarrayiterator).
TNode<JSArrayIterator> CreateArrayIterator(TNode<Context> context,
@@ -2004,8 +2030,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> originalArray,
TNode<Number> len);
- void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
- Node* to_index, RootIndex value_root_index,
+ void FillFixedArrayWithValue(ElementsKind kind, TNode<FixedArrayBase> array,
+ Node* from_index, Node* to_index,
+ RootIndex value_root_index,
ParameterMode mode = INTPTR_PARAMETERS);
// Uses memset to effectively initialize the given FixedArray with zeroes.
@@ -2014,18 +2041,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void FillFixedDoubleArrayWithZero(TNode<FixedDoubleArray> array,
TNode<IntPtrT> length);
- void FillPropertyArrayWithUndefined(Node* array, Node* from_index,
- Node* to_index,
+ void FillPropertyArrayWithUndefined(TNode<PropertyArray> array,
+ Node* from_index, Node* to_index,
ParameterMode mode = INTPTR_PARAMETERS);
enum class DestroySource { kNo, kYes };
// Collect the callable |maybe_target| feedback for either a CALL_IC or
- // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
+ // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|. There are
+ // two modes for feedback collection:
+ //
+ // kCollectFeedbackCell - collect JSFunctions, but devolve to the
+ // FeedbackCell as long as all JSFunctions
+ // seen share the same one.
+ // kDontCollectFeedbackCell - collect JSFunctions without devolving
+ // to the FeedbackCell in case a
+ // different JSFunction appears. Go directly
+ // to the Megamorphic sentinel value in this
+ // case.
+ enum class CallableFeedbackMode {
+ kCollectFeedbackCell,
+ kDontCollectFeedbackCell
+ };
void CollectCallableFeedback(TNode<Object> maybe_target,
TNode<Context> context,
TNode<FeedbackVector> feedback_vector,
- TNode<UintPtrT> slot_id);
+ TNode<UintPtrT> slot_id,
+ CallableFeedbackMode mode);
// Collect CALL_IC feedback for |maybe_target| function in the
// |feedback_vector| at |slot_id|, and the call counts in
@@ -2046,7 +2088,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Otherwise, specify DestroySource::kNo for operations where an Object is
// being cloned, to ensure that mutable HeapNumbers are unique between the
// source and cloned object.
- void CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* length,
+ void CopyPropertyArrayValues(TNode<HeapObject> from_array,
+ TNode<PropertyArray> to_array, Node* length,
WriteBarrierMode barrier_mode,
ParameterMode mode,
DestroySource destroy_source);
@@ -2054,7 +2097,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Copies all elements from |from_array| of |length| size to
// |to_array| of the same size respecting the elements kind.
void CopyFixedArrayElements(
- ElementsKind kind, Node* from_array, Node* to_array, Node* length,
+ ElementsKind kind, TNode<FixedArrayBase> from_array,
+ TNode<FixedArrayBase> to_array, Node* length,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS) {
CopyFixedArrayElements(kind, from_array, kind, to_array,
@@ -2066,9 +2110,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// zero to |to_array| of |capacity| size respecting both array's elements
// kinds.
void CopyFixedArrayElements(
- ElementsKind from_kind, TNode<Object> from_array, ElementsKind to_kind,
- TNode<Object> to_array, TNode<IntPtrT> element_count,
- TNode<IntPtrT> capacity,
+ ElementsKind from_kind, TNode<FixedArrayBase> from_array,
+ ElementsKind to_kind, TNode<FixedArrayBase> to_array,
+ TNode<IntPtrT> element_count, TNode<IntPtrT> capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS) {
CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
@@ -2085,8 +2129,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// HoleConversionMode::kConvertToUndefined, then it must not be the case that
// IsDoubleElementsKind(to_kind).
void CopyFixedArrayElements(
- ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
- Node* to_array, Node* first_element, Node* element_count, Node* capacity,
+ ElementsKind from_kind, TNode<FixedArrayBase> from_array,
+ ElementsKind to_kind, TNode<FixedArrayBase> to_array, Node* first_element,
+ Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
@@ -2187,13 +2232,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// runtime elements kind of source to make copy faster. More specifically, it
// can skip write barriers.
TNode<FixedArrayBase> ExtractFixedArray(
- Node* source, Node* first, Node* count = nullptr,
+ TNode<FixedArrayBase> source, Node* first, Node* count = nullptr,
Node* capacity = nullptr,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
TVariable<BoolT>* var_holes_converted = nullptr,
- Node* source_elements_kind = nullptr);
+ base::Optional<TNode<Int32T>> source_elements_kind = base::nullopt);
TNode<FixedArrayBase> ExtractFixedArray(
TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
@@ -2250,7 +2295,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ParameterMode parameter_mode = INTPTR_PARAMETERS,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert,
TVariable<BoolT>* var_holes_converted = nullptr,
- Node* source_runtime_kind = nullptr);
+ base::Optional<TNode<Int32T>> source_runtime_kind = base::nullopt);
// Attempt to copy a FixedDoubleArray to another FixedDoubleArray. In the case
// where the source array has a hole, produce a FixedArray instead where holes
@@ -2271,8 +2316,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
TNode<FixedArrayBase> ExtractFixedDoubleArrayFillingHoles(
- Node* source, Node* first, Node* count, Node* capacity, Node* source_map,
- TVariable<BoolT>* var_holes_converted, AllocationFlags allocation_flags,
+ TNode<FixedArrayBase> source, Node* first, Node* count, Node* capacity,
+ TNode<Map> source_map, TVariable<BoolT>* var_holes_converted,
+ AllocationFlags allocation_flags,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
@@ -2318,34 +2364,34 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
- TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements,
- ElementsKind kind, Node* key,
- Label* bailout);
+ TNode<FixedArrayBase> TryGrowElementsCapacity(TNode<HeapObject> object,
+ TNode<FixedArrayBase> elements,
+ ElementsKind kind,
+ TNode<Smi> key, Label* bailout);
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
- TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements,
+ TNode<FixedArrayBase> TryGrowElementsCapacity(TNode<HeapObject> object,
+ TNode<FixedArrayBase> elements,
ElementsKind kind, Node* key,
Node* capacity,
ParameterMode mode,
Label* bailout);
// Grows elements capacity of given object. Returns new elements.
- TNode<FixedArrayBase> GrowElementsCapacity(Node* object, Node* elements,
- ElementsKind from_kind,
- ElementsKind to_kind,
- Node* capacity, Node* new_capacity,
- ParameterMode mode,
- Label* bailout);
+ TNode<FixedArrayBase> GrowElementsCapacity(
+ TNode<HeapObject> object, TNode<FixedArrayBase> elements,
+ ElementsKind from_kind, ElementsKind to_kind, Node* capacity,
+ Node* new_capacity, ParameterMode mode, Label* bailout);
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
// is followed for allocation failure.
void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind,
- Node* array, Node* length,
- Variable* var_elements, Node* growth,
- Label* bailout);
+ TNode<HeapObject> array, Node* length,
+ TVariable<FixedArrayBase>* var_elements,
+ Node* growth, Label* bailout);
// Allocation site manipulation
void InitializeAllocationMemento(TNode<HeapObject> base,
@@ -2474,6 +2520,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsCoverageInfo(TNode<HeapObject> object);
TNode<BoolT> IsDebugInfo(TNode<HeapObject> object);
TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
@@ -2488,7 +2535,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFixedArrayWithKind(SloppyTNode<HeapObject> object,
ElementsKind kind);
- TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<HeapObject> object,
+ TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<FixedArrayBase> object,
ElementsKind kind);
TNode<BoolT> IsFixedDoubleArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
@@ -2583,6 +2630,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsInternalizedStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
+ TNode<BoolT> IsUniqueNameNoCachedIndex(TNode<HeapObject> object);
TNode<BoolT> IsUndetectableMap(SloppyTNode<Map> map);
TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
@@ -2637,7 +2685,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// [0, 2^32-1).
TNode<BoolT> IsNumberArrayIndex(TNode<Number> number);
- Node* FixedArraySizeDoesntFitInNewSpace(
+ TNode<BoolT> FixedArraySizeDoesntFitInNewSpace(
Node* element_count, int base_size = FixedArray::kHeaderSize,
ParameterMode mode = INTPTR_PARAMETERS);
@@ -2725,23 +2773,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSReceiver> ToObject_Inline(TNode<Context> context,
TNode<Object> input);
- enum ToIntegerTruncationMode {
- kNoTruncation,
- kTruncateMinusZero,
- };
-
// ES6 7.1.15 ToLength, but with inlined fast path.
TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
- // ES6 7.1.4 ToInteger ( argument )
- TNode<Number> ToInteger_Inline(SloppyTNode<Context> context,
- SloppyTNode<Object> input,
- ToIntegerTruncationMode mode = kNoTruncation);
- TNode<Number> ToInteger(SloppyTNode<Context> context,
- SloppyTNode<Object> input,
- ToIntegerTruncationMode mode = kNoTruncation);
-
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
@@ -3091,9 +3126,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* bailout);
// Tries to check if {object} has own {unique_name} property.
- void TryHasOwnProperty(Node* object, Node* map, Node* instance_type,
- Node* unique_name, Label* if_found,
- Label* if_not_found, Label* if_bailout);
+ void TryHasOwnProperty(TNode<HeapObject> object, TNode<Map> map,
+ TNode<Int32T> instance_type, TNode<Name> unique_name,
+ Label* if_found, Label* if_not_found,
+ Label* if_bailout);
// Operating mode for TryGetOwnProperty and CallGetterIfAccessor
// kReturnAccessorPair is used when we're only getting the property descriptor
@@ -3192,7 +3228,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(SloppyTNode<JSReceiver> object, SloppyTNode<Map> map,
+ void TryLookupProperty(SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type,
SloppyTNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
@@ -3350,8 +3386,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> length, TNode<IntPtrT> key,
Label* bailout);
- Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
- Node* length, ParameterMode mode, Label* bailout);
+ TNode<FixedArrayBase> CopyElementsOnWrite(TNode<HeapObject> object,
+ TNode<FixedArrayBase> elements,
+ ElementsKind kind, Node* length,
+ ParameterMode mode, Label* bailout);
void TransitionElementsKind(TNode<JSObject> object, TNode<Map> map,
ElementsKind from_kind, ElementsKind to_kind,
@@ -3558,7 +3596,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
TNode<RawPtrT> LoadJSArrayBufferBackingStore(
TNode<JSArrayBuffer> array_buffer);
- TNode<BoolT> IsDetachedBuffer(TNode<JSArrayBuffer> buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
@@ -3611,9 +3648,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
// for..in helpers
- void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
- Label* if_fast, Label* if_slow);
- TNode<Map> CheckEnumCache(TNode<HeapObject> receiver, Label* if_empty,
+ void CheckPrototypeEnumCache(TNode<JSReceiver> receiver,
+ TNode<Map> receiver_map, Label* if_fast,
+ Label* if_slow);
+ TNode<Map> CheckEnumCache(TNode<JSReceiver> receiver, Label* if_empty,
Label* if_runtime);
TNode<Object> GetArgumentValue(TorqueStructArguments args,
@@ -3635,8 +3673,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
STATIC_ASSERT(sizeof...(TArgs) <= 3);
const TNode<Object> make_type_error = LoadContextElement(
LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
- return CAST(CallJS(CodeFactory::Call(isolate()), context, make_type_error,
- UndefinedConstant(), SmiConstant(message), args...));
+ return CAST(Call(context, make_type_error, UndefinedConstant(),
+ SmiConstant(message), args...));
}
void Abort(AbortReason reason) {
@@ -3649,6 +3687,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
bool ConstexprInt31NotEqual(int31_t a, int31_t b) { return a != b; }
bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; }
+ bool ConstexprInt32Equal(int32_t a, int32_t b) { return a == b; }
+ bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; }
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
int31_t ConstexprInt31Add(int31_t a, int31_t b) {
@@ -3892,6 +3932,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> LoadRoot(RootIndex root_index) {
return CodeAssembler::LoadRoot(root_index);
}
+
+ void StoreFixedArrayOrPropertyArrayElement(
+ TNode<UnionT<FixedArray, PropertyArray>> array, Node* index,
+ TNode<Object> value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
};
// template <typename TIndex>
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 04e80f5a6e..595e59f551 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -11,12 +11,14 @@
#include "src/asmjs/asm-js.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
+#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/pending-optimization-table.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
@@ -24,16 +26,22 @@
#include "src/compiler/pipeline.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/isolate.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/map.h"
+#include "src/objects/object-list-macros.h"
+#include "src/objects/shared-function-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
@@ -161,6 +169,16 @@ CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
return UpdateState(FinalizeJobImpl(shared_info, isolate), State::kSucceeded);
}
+CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
+ Handle<SharedFunctionInfo> shared_info, OffThreadIsolate* isolate) {
+ DisallowHeapAccess no_heap_access;
+
+ // Delegate to the underlying implementation.
+ DCHECK_EQ(state(), State::kReadyToFinalize);
+ ScopedTimer t(&time_taken_to_finalize_);
+ return UpdateState(FinalizeJobImpl(shared_info, isolate), State::kSucceeded);
+}
+
void UnoptimizedCompilationJob::RecordCompilationStats(Isolate* isolate) const {
int code_size;
if (compilation_info()->has_bytecode_array()) {
@@ -208,7 +226,8 @@ CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
DisallowJavascriptExecution no_js(isolate);
if (FLAG_trace_opt && compilation_info()->IsOptimizing()) {
- StdoutStream os;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ OFStream os(scope.file());
os << "[compiling method " << Brief(*compilation_info()->closure())
<< " using " << compiler_name_;
if (compilation_info()->is_osr()) os << " OSR";
@@ -262,10 +281,11 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
double ms_optimize = time_taken_to_execute_.InMillisecondsF();
double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
if (FLAG_trace_opt) {
- PrintF("[optimizing ");
- function->ShortPrint();
- PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
- ms_codegen);
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[optimizing ");
+ function->ShortPrint(scope.file());
+ PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph,
+ ms_optimize, ms_codegen);
}
if (FLAG_trace_opt_stats) {
static double compilation_time = 0.0;
@@ -442,9 +462,40 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
}
}
+void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
+ Handle<SharedFunctionInfo> shared_info,
+ ParseInfo* parse_info, OffThreadIsolate* isolate) {
+ DCHECK_EQ(shared_info->language_mode(),
+ compilation_info->literal()->language_mode());
+
+ // Update the shared function info with the scope info.
+ Handle<ScopeInfo> scope_info = compilation_info->scope()->scope_info();
+ shared_info->set_scope_info(*scope_info);
+
+ DCHECK(compilation_info->has_bytecode_array());
+ DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once.
+ DCHECK(!compilation_info->has_asm_wasm_data());
+ DCHECK(!shared_info->HasFeedbackMetadata());
+
+ // If the function failed asm-wasm compilation, mark asm_wasm as broken
+ // to ensure we don't try to compile as asm-wasm.
+ if (compilation_info->literal()->scope()->IsAsmModule()) {
+ shared_info->set_is_asm_wasm_broken(true);
+ }
+
+ shared_info->set_bytecode_array(*compilation_info->bytecode_array());
+
+ Handle<FeedbackMetadata> feedback_metadata =
+ FeedbackMetadata::New(isolate, compilation_info->feedback_vector_spec());
+ shared_info->set_feedback_metadata(*feedback_metadata);
+
+ DCHECK(!compilation_info->has_coverage_info());
+}
+
+template <typename LocalIsolate>
void EnsureSharedFunctionInfosArrayOnScript(Handle<Script> script,
ParseInfo* parse_info,
- Isolate* isolate) {
+ LocalIsolate* isolate) {
DCHECK(parse_info->is_toplevel());
if (script->shared_function_infos().length() > 0) {
DCHECK_EQ(script->shared_function_infos().length(),
@@ -452,21 +503,25 @@ void EnsureSharedFunctionInfosArrayOnScript(Handle<Script> script,
return;
}
Handle<WeakFixedArray> infos(isolate->factory()->NewWeakFixedArray(
- parse_info->max_function_literal_id() + 1));
+ parse_info->max_function_literal_id() + 1, AllocationType::kOld));
script->set_shared_function_infos(*infos);
}
void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
- Handle<SharedFunctionInfo> shared_info) {
- shared_info->set_has_duplicate_parameters(
- literal->has_duplicate_parameters());
- shared_info->set_is_oneshot_iife(literal->is_oneshot_iife());
- shared_info->UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal);
+ SharedFunctionInfo shared_info) {
+ shared_info.set_has_duplicate_parameters(literal->has_duplicate_parameters());
+ shared_info.set_is_oneshot_iife(literal->is_oneshot_iife());
+ shared_info.UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal);
if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
- shared_info->DisableOptimization(literal->dont_optimize_reason());
+ shared_info.DisableOptimization(literal->dont_optimize_reason());
}
- shared_info->set_is_safe_to_skip_arguments_adaptor(
+
+ shared_info.set_class_scope_has_private_brand(
+ literal->class_scope_has_private_brand());
+ shared_info.set_is_safe_to_skip_arguments_adaptor(
literal->SafeToSkipArgumentsAdaptor());
+ shared_info.set_has_static_private_methods_or_accessors(
+ literal->has_static_private_methods_or_accessors());
}
CompilationJob::Status FinalizeUnoptimizedCompilationJob(
@@ -475,7 +530,7 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(
UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
ParseInfo* parse_info = job->parse_info();
- SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), shared_info);
+ SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), *shared_info);
CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
if (status == CompilationJob::SUCCEEDED) {
@@ -485,7 +540,8 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(
// background compile was started in which the compiled bytecode will not be
// missing source positions (for instance by enabling the cpu profiler). So
// force source position collection now in that case.
- if (isolate->NeedsDetailedOptimizedCodeLineInfo()) {
+ if (!parse_info->collect_source_positions() &&
+ isolate->NeedsDetailedOptimizedCodeLineInfo()) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
}
@@ -503,6 +559,23 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(
return status;
}
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(
+ UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
+ OffThreadIsolate* isolate) {
+ UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
+ ParseInfo* parse_info = job->parse_info();
+
+ SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), *shared_info);
+
+ CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
+ if (status == CompilationJob::SUCCEEDED) {
+ InstallUnoptimizedCode(compilation_info, shared_info, parse_info, isolate);
+
+ // TODO(leszeks): Record the function compilation and compilation stats.
+ }
+ return status;
+}
+
std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
@@ -564,7 +637,7 @@ MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
AccountingAllocator* allocator, IsCompiledScope* is_compiled_scope) {
EnsureSharedFunctionInfosArrayOnScript(script, parse_info, isolate);
- parse_info->ast_value_factory()->Internalize(isolate->factory());
+ parse_info->ast_value_factory()->Internalize(isolate);
if (!Compiler::Analyze(parse_info)) return MaybeHandle<SharedFunctionInfo>();
DeclarationScope::AllocateScopeInfos(parse_info, isolate);
@@ -629,12 +702,14 @@ MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
return top_level;
}
+template <typename LocalIsolate>
bool FinalizeUnoptimizedCode(
- ParseInfo* parse_info, Isolate* isolate,
+ ParseInfo* parse_info, LocalIsolate* isolate,
Handle<SharedFunctionInfo> shared_info,
UnoptimizedCompilationJob* outer_function_job,
UnoptimizedCompilationJobList* inner_function_jobs) {
- DCHECK(AllowCompilation::IsAllowed(isolate));
+ // TODO(leszeks): Re-enable.
+ // DCHECK(AllowCompilation::IsAllowed(isolate));
// TODO(rmcilroy): Clear native context in debug once AsmJS generates doesn't
// rely on accessing native context during finalization.
@@ -649,6 +724,7 @@ bool FinalizeUnoptimizedCode(
}
Handle<Script> script(Script::cast(shared_info->script()), isolate);
+ parse_info->CheckFlagsForFunctionFromScript(*script);
// Finalize the inner functions' compilation jobs.
for (auto&& inner_job : *inner_function_jobs) {
@@ -752,9 +828,10 @@ bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate) {
CompilationJob::SUCCEEDED ||
job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
if (FLAG_trace_opt) {
- PrintF("[aborted optimizing ");
- compilation_info->closure()->ShortPrint();
- PrintF(" because: %s]\n",
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[aborted optimizing ");
+ compilation_info->closure()->ShortPrint(scope.file());
+ PrintF(scope.file(), " because: %s]\n",
GetBailoutReason(compilation_info->bailout_reason()));
}
return false;
@@ -838,12 +915,13 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
if (GetCodeFromOptimizedCodeCache(function, osr_offset)
.ToHandle(&cached_code)) {
if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[found optimized code for ");
+ function->ShortPrint(scope.file());
if (!osr_offset.IsNone()) {
- PrintF(" at OSR AST id %d", osr_offset.ToInt());
+ PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt());
}
- PrintF("]\n");
+ PrintF(scope.file(), "]\n");
}
return cached_code;
}
@@ -932,6 +1010,13 @@ bool FailWithPendingException(Isolate* isolate, Handle<Script> script,
return false;
}
+bool FailWithPendingException(OffThreadIsolate* isolate, Handle<Script> script,
+ ParseInfo* parse_info,
+ Compiler::ClearExceptionFlag flag) {
+ // TODO(leszeks): Implement.
+ UNREACHABLE();
+}
+
void FinalizeScriptCompilation(Isolate* isolate, Handle<Script> script,
ParseInfo* parse_info) {
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
@@ -954,12 +1039,19 @@ void FinalizeScriptCompilation(Isolate* isolate, Handle<Script> script,
}
}
+void FinalizeScriptCompilation(OffThreadIsolate* isolate, Handle<Script> script,
+ ParseInfo* parse_info) {
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ DCHECK(!parse_info->parallel_tasks());
+}
+
+template <typename LocalIsolate>
MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
- ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
+ ParseInfo* parse_info, Handle<Script> script, LocalIsolate* isolate,
UnoptimizedCompilationJob* outer_function_job,
UnoptimizedCompilationJobList* inner_function_jobs) {
// Internalize ast values onto the heap.
- parse_info->ast_value_factory()->Internalize(isolate->factory());
+ parse_info->ast_value_factory()->Internalize(isolate);
// Create shared function infos for top level and shared function infos array
// for inner functions.
@@ -984,7 +1076,8 @@ MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
}
MaybeHandle<SharedFunctionInfo> CompileToplevel(
- ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
+ ParseInfo* parse_info, Handle<Script> script,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info, Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
TimerEventScope<TimerEventCompileCode> top_level_timer(isolate);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
@@ -997,7 +1090,8 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
: RuntimeCallCounterId::kCompileScript);
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
- !parsing::ParseProgram(parse_info, script, isolate)) {
+ !parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
+ isolate)) {
return MaybeHandle<SharedFunctionInfo>();
}
// Measure how long it takes to do the compilation; only take the
@@ -1044,34 +1138,43 @@ std::unique_ptr<UnoptimizedCompilationJob> CompileOnBackgroundThread(
return outer_function_job;
}
+MaybeHandle<SharedFunctionInfo> CompileToplevel(
+ ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
+ return CompileToplevel(parse_info, script, kNullMaybeHandle, isolate,
+ is_compiled_scope);
+}
+
} // namespace
BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate)
: info_(new ParseInfo(isolate)),
+ off_thread_isolate_(FLAG_finalize_streaming_on_background
+ ? new OffThreadIsolate(isolate, info_->zone())
+ : nullptr),
stack_size_(i::FLAG_stack_size),
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
allocator_(isolate->allocator()),
- timer_(isolate->counters()->compile_script_on_background()) {
+ timer_(isolate->counters()->compile_script_on_background()),
+ collected_source_positions_(false) {
VMState<PARSER> state(isolate);
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
info_->script_id()));
- info_->set_toplevel();
- info_->set_allow_lazy_parsing();
- if (V8_UNLIKELY(info_->block_coverage_enabled())) {
- info_->AllocateSourceRangeMap();
- }
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- info_->set_language_mode(
- stricter_language_mode(info_->language_mode(), language_mode));
+ info_->SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(), true,
+ construct_language_mode(FLAG_use_strict),
+ REPLMode::kNo);
+ language_mode_ = info_->language_mode();
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
streamed_data->source_stream.get(), streamed_data->encoding));
info_->set_character_stream(std::move(stream));
+
+ finalize_on_background_thread_ = FLAG_finalize_streaming_on_background;
}
BackgroundCompileTask::BackgroundCompileTask(
@@ -1084,7 +1187,10 @@ BackgroundCompileTask::BackgroundCompileTask(
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
allocator_(allocator),
- timer_(timer) {
+ timer_(timer),
+ language_mode_(info_->language_mode()),
+ collected_source_positions_(false),
+ finalize_on_background_thread_(false) {
DCHECK(outer_parse_info->is_toplevel());
DCHECK(!function_literal->is_toplevel());
@@ -1124,6 +1230,7 @@ class OffThreadParseInfoScope {
}
~OffThreadParseInfoScope() {
+ DCHECK_NOT_NULL(parse_info_);
parse_info_->set_stack_limit(original_stack_limit_);
parse_info_->set_runtime_call_stats(original_runtime_call_stats_);
}
@@ -1145,8 +1252,9 @@ void BackgroundCompileTask::Run() {
DisallowHeapAccess no_heap_access;
TimedHistogramScope timer(timer_);
- OffThreadParseInfoScope off_thread_scope(
- info_.get(), worker_thread_runtime_call_stats_, stack_size_);
+ base::Optional<OffThreadParseInfoScope> off_thread_scope(
+ base::in_place, info_.get(), worker_thread_runtime_call_stats_,
+ stack_size_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"BackgroundCompileTask::Run");
RuntimeCallTimerScope runtimeTimer(
@@ -1167,6 +1275,52 @@ void BackgroundCompileTask::Run() {
// Parsing has succeeded, compile.
outer_function_job_ = CompileOnBackgroundThread(info_.get(), allocator_,
&inner_function_jobs_);
+ // Save the language mode and record whether we collected source positions.
+ language_mode_ = info_->language_mode();
+ collected_source_positions_ = info_->collect_source_positions();
+
+ if (finalize_on_background_thread_) {
+ DCHECK(info_->is_toplevel());
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground");
+
+ off_thread_isolate_->PinToCurrentThread();
+
+ OffThreadHandleScope handle_scope(off_thread_isolate_.get());
+
+ // We don't have the script source or the script origin yet, so use a few
+ // default values for them. These will be fixed up during the main-thread
+ // merge.
+ Handle<Script> script =
+ info_->CreateScript(off_thread_isolate_.get(),
+ off_thread_isolate_->factory()->empty_string(),
+ ScriptOriginOptions(), NOT_NATIVES_CODE);
+
+ Handle<SharedFunctionInfo> outer_function_sfi =
+ FinalizeTopLevel(info_.get(), script, off_thread_isolate_.get(),
+ outer_function_job_.get(), &inner_function_jobs_)
+ .ToHandleChecked();
+
+ parser_->HandleSourceURLComments(off_thread_isolate_.get(), script);
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground.Finish");
+ off_thread_isolate_->FinishOffThread();
+
+ // Off-thread handles will become invalid after the handle scope closes,
+ // so save the raw object here.
+ outer_function_sfi_ = *outer_function_sfi;
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground.ReleaseParser");
+ DCHECK_EQ(language_mode_, info_->language_mode());
+ off_thread_scope.reset();
+ parser_.reset();
+ info_.reset();
+ outer_function_job_.reset();
+ inner_function_jobs_.clear();
+ }
}
}
@@ -1357,7 +1511,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Internalize ast values onto the heap.
- parse_info.ast_value_factory()->Internalize(isolate->factory());
+ parse_info.ast_value_factory()->Internalize(isolate);
// Finalize compilation of the unoptimized bytecode or asm-js data.
if (!FinalizeUnoptimizedCode(&parse_info, isolate, shared_info,
@@ -1422,9 +1576,10 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
if (FLAG_trace_opt) {
- PrintF("[optimizing ");
- function->ShortPrint();
- PrintF(" because --always-opt]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[optimizing ");
+ function->ShortPrint(scope.file());
+ PrintF(scope.file(), " because --always-opt]\n");
}
Handle<Code> opt_code;
if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent)
@@ -1456,9 +1611,7 @@ bool Compiler::FinalizeBackgroundCompileTask(
DCHECK(!shared_info->is_compiled());
Handle<Script> script(Script::cast(shared_info->script()), isolate);
- // TODO(leszeks): We can probably remove this, the parse_info flags should
- // already match the script's.
- parse_info->SetFlagsFromScript(isolate, *script);
+ parse_info->CheckFlagsForFunctionFromScript(*script);
task->parser()->UpdateStatistics(isolate, script);
task->parser()->HandleSourceURLComments(isolate, script);
@@ -1469,7 +1622,7 @@ bool Compiler::FinalizeBackgroundCompileTask(
}
// Parsing has succeeded - finalize compilation.
- parse_info->ast_value_factory()->Internalize(isolate->factory());
+ parse_info->ast_value_factory()->Internalize(isolate);
if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
task->outer_function_job(),
task->inner_function_jobs())) {
@@ -1565,9 +1718,21 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
allow_eval_cache = true;
} else {
ParseInfo parse_info(isolate);
+ parse_info.SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(),
+ true, language_mode, REPLMode::kNo);
+
+ parse_info.set_eval();
+ parse_info.set_parse_restriction(restriction);
+ parse_info.set_parameters_end_pos(parameters_end_pos);
+ DCHECK(!parse_info.is_module());
+
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info;
+ if (!context->IsNativeContext()) {
+ maybe_outer_scope_info = handle(context->scope_info(), isolate);
+ }
+
script = parse_info.CreateScript(
isolate, source, OriginOptionsForEval(outer_info->script()));
- script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
script->set_eval_from_shared(*outer_info);
if (eval_position == kNoSourcePosition) {
// If the position is missing, attempt to get the code offset by
@@ -1586,16 +1751,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
}
script->set_eval_from_position(eval_position);
- parse_info.set_eval();
- parse_info.set_language_mode(language_mode);
- parse_info.set_parse_restriction(restriction);
- parse_info.set_parameters_end_pos(parameters_end_pos);
- if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info(), isolate));
- }
- DCHECK(!parse_info.is_module());
-
- if (!CompileToplevel(&parse_info, script, isolate, &is_compiled_scope)
+ if (!CompileToplevel(&parse_info, script, maybe_outer_scope_info, isolate,
+ &is_compiled_scope)
.ToHandle(&shared_info)) {
return MaybeHandle<JSFunction>();
}
@@ -1981,32 +2138,51 @@ struct ScriptCompileTimerScope {
}
};
-Handle<Script> NewScript(Isolate* isolate, ParseInfo* parse_info,
- Handle<String> source,
- Compiler::ScriptDetails script_details,
- ScriptOriginOptions origin_options,
- NativesFlag natives) {
- // Create a script object describing the script to be compiled.
- Handle<Script> script = parse_info->CreateScript(
- isolate, source, origin_options, script_details.repl_mode, natives);
+void SetScriptFieldsFromDetails(Script script,
+ Compiler::ScriptDetails script_details) {
Handle<Object> script_name;
if (script_details.name_obj.ToHandle(&script_name)) {
- script->set_name(*script_name);
- script->set_line_offset(script_details.line_offset);
- script->set_column_offset(script_details.column_offset);
+ script.set_name(*script_name);
+ script.set_line_offset(script_details.line_offset);
+ script.set_column_offset(script_details.column_offset);
}
Handle<Object> source_map_url;
if (script_details.source_map_url.ToHandle(&source_map_url)) {
- script->set_source_mapping_url(*source_map_url);
+ script.set_source_mapping_url(*source_map_url);
}
Handle<FixedArray> host_defined_options;
if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
- script->set_host_defined_options(*host_defined_options);
+ script.set_host_defined_options(*host_defined_options);
}
+}
+
+Handle<Script> NewScript(Isolate* isolate, ParseInfo* parse_info,
+ Handle<String> source,
+ Compiler::ScriptDetails script_details,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives) {
+ // Create a script object describing the script to be compiled.
+ Handle<Script> script =
+ parse_info->CreateScript(isolate, source, origin_options, natives);
+ SetScriptFieldsFromDetails(*script, script_details);
LOG(isolate, ScriptDetails(*script));
return script;
}
+void FixUpOffThreadAllocatedScript(Isolate* isolate, Handle<Script> script,
+ Handle<String> source,
+ Compiler::ScriptDetails script_details,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(natives, NOT_NATIVES_CODE);
+ DCHECK_EQ(script_details.repl_mode, REPLMode::kNo);
+ script->set_origin_options(origin_options);
+ script->set_source(*source);
+ SetScriptFieldsFromDetails(*script, script_details);
+ LOG(isolate, ScriptDetails(*script));
+}
+
} // namespace
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
@@ -2077,19 +2253,24 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
if (maybe_result.is_null()) {
- ParseInfo parse_info(isolate);
// No cache entry found compile the script.
+ ParseInfo parse_info(isolate);
+
+ parse_info.SetFlagsForToplevelCompile(
+ isolate->is_collecting_type_profile(), natives == NOT_NATIVES_CODE,
+ language_mode, script_details.repl_mode);
+
+ parse_info.set_module(origin_options.IsModule());
+ parse_info.set_extension(extension);
+ parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
+
Handle<Script> script = NewScript(isolate, &parse_info, source,
script_details, origin_options, natives);
+ DCHECK_IMPLIES(parse_info.collect_type_profile(),
+ script->IsUserJavaScript());
DCHECK_EQ(parse_info.is_repl_mode(), script->is_repl_mode());
// Compile the function and add it to the isolate cache.
- if (origin_options.IsModule()) parse_info.set_module();
- parse_info.set_extension(extension);
- parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
-
- parse_info.set_language_mode(
- stricter_language_mode(parse_info.language_mode(), language_mode));
maybe_result =
CompileToplevel(&parse_info, script, isolate, &is_compiled_scope);
Handle<SharedFunctionInfo> result;
@@ -2151,27 +2332,30 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
IsCompiledScope is_compiled_scope;
if (!maybe_result.ToHandle(&wrapped)) {
ParseInfo parse_info(isolate);
- script = NewScript(isolate, &parse_info, source, script_details,
- origin_options, NOT_NATIVES_CODE);
- script->set_wrapped_arguments(*arguments);
+ parse_info.SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(),
+ true, language_mode,
+ script_details.repl_mode);
parse_info.set_eval(); // Use an eval scope as declaration scope.
parse_info.set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
- parse_info.set_wrapped_arguments(arguments);
// TODO(delphick): Remove this and instead make the wrapped and wrapper
// functions fully non-lazy instead thus preventing source positions from
// being omitted.
parse_info.set_collect_source_positions(true);
// parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
+
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info(), isolate));
+ maybe_outer_scope_info = handle(context->scope_info(), isolate);
}
- parse_info.set_language_mode(
- stricter_language_mode(parse_info.language_mode(), language_mode));
+
+ script = NewScript(isolate, &parse_info, source, script_details,
+ origin_options, NOT_NATIVES_CODE);
+ script->set_wrapped_arguments(*arguments);
Handle<SharedFunctionInfo> top_level;
- maybe_result =
- CompileToplevel(&parse_info, script, isolate, &is_compiled_scope);
+ maybe_result = CompileToplevel(&parse_info, script, maybe_outer_scope_info,
+ isolate, &is_compiled_scope);
if (maybe_result.is_null()) isolate->ReportPendingMessages();
ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level, maybe_result, JSFunction);
@@ -2208,59 +2392,107 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
isolate->counters()->total_compile_size()->Increment(source_length);
BackgroundCompileTask* task = streaming_data->task.get();
- ParseInfo* parse_info = task->info();
- DCHECK(parse_info->is_toplevel());
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
// Check if compile cache already holds the SFI, if so no need to finalize
// the code compiled on the background thread.
CompilationCache* compilation_cache = isolate->compilation_cache();
- MaybeHandle<SharedFunctionInfo> maybe_result =
- compilation_cache->LookupScript(
- source, script_details.name_obj, script_details.line_offset,
- script_details.column_offset, origin_options,
- isolate->native_context(), parse_info->language_mode());
- if (!maybe_result.is_null()) {
- compile_timer.set_hit_isolate_cache();
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.StreamingFinalization.CheckCache");
+ maybe_result = compilation_cache->LookupScript(
+ source, script_details.name_obj, script_details.line_offset,
+ script_details.column_offset, origin_options, isolate->native_context(),
+ task->language_mode());
+ if (!maybe_result.is_null()) {
+ compile_timer.set_hit_isolate_cache();
+ }
}
if (maybe_result.is_null()) {
- // No cache entry found, finalize compilation of the script and add it to
- // the isolate cache.
- Handle<Script> script =
- NewScript(isolate, parse_info, source, script_details, origin_options,
- NOT_NATIVES_CODE);
- task->parser()->UpdateStatistics(isolate, script);
- task->parser()->HandleSourceURLComments(isolate, script);
-
- if (parse_info->literal() == nullptr || !task->outer_function_job()) {
- // Parsing has failed - report error messages.
- FailWithPendingException(isolate, script, parse_info,
- Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
+ if (task->finalize_on_background_thread()) {
+ RuntimeCallTimerScope runtimeTimerScope(
+ isolate, RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish");
+
+ Handle<SharedFunctionInfo> sfi(task->outer_function_sfi(), isolate);
+ Handle<Script> script(Script::cast(sfi->script()), isolate);
+ task->off_thread_isolate()->factory()->Publish(isolate);
+
+ FixUpOffThreadAllocatedScript(isolate, script, source, script_details,
+ origin_options, NOT_NATIVES_CODE);
+
+ // It's possible that source position collection was enabled after the
+ // background compile was started (for instance by enabling the cpu
+ // profiler), and the compiled bytecode is missing source positions. So,
+ // walk all the SharedFunctionInfos in the script and force source
+ // position collection.
+ if (!task->collected_source_positions() &&
+ isolate->NeedsDetailedOptimizedCodeLineInfo()) {
+ Handle<WeakFixedArray> shared_function_infos(
+ script->shared_function_infos(isolate), isolate);
+ int length = shared_function_infos->length();
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i++, {
+ Object entry = shared_function_infos->Get(isolate, i)
+ .GetHeapObjectOrSmi(isolate);
+ if (entry.IsSharedFunctionInfo(isolate)) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ isolate, handle(SharedFunctionInfo::cast(entry), isolate));
+ }
+ });
+ }
+
+ maybe_result = sfi;
} else {
- // Parsing has succeeded - finalize compilation.
- maybe_result = FinalizeTopLevel(parse_info, script, isolate,
- task->outer_function_job(),
- task->inner_function_jobs());
- if (maybe_result.is_null()) {
- // Finalization failed - throw an exception.
+ ParseInfo* parse_info = task->info();
+ DCHECK(parse_info->is_toplevel());
+
+ // No cache entry found, finalize compilation of the script and add it to
+ // the isolate cache.
+ Handle<Script> script =
+ NewScript(isolate, parse_info, source, script_details, origin_options,
+ NOT_NATIVES_CODE);
+ task->parser()->UpdateStatistics(isolate, script);
+ task->parser()->HandleSourceURLComments(isolate, script);
+
+ if (parse_info->literal() == nullptr || !task->outer_function_job()) {
+ // Parsing has failed - report error messages.
FailWithPendingException(isolate, script, parse_info,
Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
+ } else {
+ // Parsing has succeeded - finalize compilation.
+ maybe_result = FinalizeTopLevel(parse_info, script, isolate,
+ task->outer_function_job(),
+ task->inner_function_jobs());
+ if (maybe_result.is_null()) {
+ // Finalization failed - throw an exception.
+ FailWithPendingException(
+ isolate, script, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
+ }
}
}
// Add compiled code to the isolate cache.
Handle<SharedFunctionInfo> result;
if (maybe_result.ToHandle(&result)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.StreamingFinalization.AddToCache");
compilation_cache->PutScript(source, isolate->native_context(),
- parse_info->language_mode(), result);
+ task->language_mode(), result);
}
}
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.StreamingFinalization.Release");
streaming_data->Release();
return maybe_result;
}
+template <typename LocalIsolate>
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
- FunctionLiteral* literal, Handle<Script> script, Isolate* isolate) {
+ FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate) {
// Precondition: code has been parsed and scopes have been analyzed.
MaybeHandle<SharedFunctionInfo> maybe_existing;
@@ -2303,6 +2535,11 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
return result;
}
+template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
+ FunctionLiteral* literal, Handle<Script> script, Isolate* isolate);
+template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
+ FunctionLiteral* literal, Handle<Script> script, OffThreadIsolate* isolate);
+
MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
BailoutId osr_offset,
JavaScriptFrame* osr_frame) {
@@ -2347,9 +2584,10 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
isolate);
InsertCodeIntoOptimizedCodeCache(compilation_info);
if (FLAG_trace_opt) {
- PrintF("[completed optimizing ");
- compilation_info->closure()->ShortPrint();
- PrintF("]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[completed optimizing ");
+ compilation_info->closure()->ShortPrint(scope.file());
+ PrintF(scope.file(), "]\n");
}
compilation_info->closure()->set_code(*compilation_info->code());
return CompilationJob::SUCCEEDED;
@@ -2358,9 +2596,10 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
if (FLAG_trace_opt) {
- PrintF("[aborted optimizing ");
- compilation_info->closure()->ShortPrint();
- PrintF(" because: %s]\n",
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[aborted optimizing ");
+ compilation_info->closure()->ShortPrint(scope.file());
+ PrintF(scope.file(), " because: %s]\n",
GetBailoutReason(compilation_info->bailout_reason()));
}
compilation_info->closure()->set_code(shared->GetCode());
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 7747d1c074..7284003de9 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -10,10 +10,11 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/logging/code-events.h"
-#include "src/utils/allocation.h"
#include "src/objects/contexts.h"
+#include "src/utils/allocation.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -176,9 +177,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
- static Handle<SharedFunctionInfo> GetSharedFunctionInfo(FunctionLiteral* node,
- Handle<Script> script,
- Isolate* isolate);
+ template <typename LocalIsolate>
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
+ FunctionLiteral* node, Handle<Script> script, LocalIsolate* isolate);
// ===========================================================================
// The following family of methods provides support for OSR. Code generated
@@ -258,6 +259,10 @@ class UnoptimizedCompilationJob : public CompilationJob {
V8_WARN_UNUSED_RESULT Status
FinalizeJob(Handle<SharedFunctionInfo> shared_info, Isolate* isolate);
+ // Finalizes the compile job. Can be called on a background thread.
+ V8_WARN_UNUSED_RESULT Status FinalizeJob(
+ Handle<SharedFunctionInfo> shared_info, OffThreadIsolate* isolate);
+
void RecordCompilationStats(Isolate* isolate) const;
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<SharedFunctionInfo> shared,
@@ -275,6 +280,8 @@ class UnoptimizedCompilationJob : public CompilationJob {
virtual Status ExecuteJobImpl() = 0;
virtual Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) = 0;
+ virtual Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ OffThreadIsolate* isolate) = 0;
private:
uintptr_t stack_limit_;
@@ -364,7 +371,10 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
void Run();
- ParseInfo* info() { return info_.get(); }
+ ParseInfo* info() {
+ DCHECK_NOT_NULL(info_);
+ return info_.get();
+ }
Parser* parser() { return parser_.get(); }
UnoptimizedCompilationJob* outer_function_job() {
return outer_function_job_.get();
@@ -372,6 +382,18 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
UnoptimizedCompilationJobList* inner_function_jobs() {
return &inner_function_jobs_;
}
+ LanguageMode language_mode() { return language_mode_; }
+ bool collected_source_positions() { return collected_source_positions_; }
+ bool finalize_on_background_thread() {
+ return finalize_on_background_thread_;
+ }
+ OffThreadIsolate* off_thread_isolate() { return off_thread_isolate_.get(); }
+ SharedFunctionInfo outer_function_sfi() {
+ // Make sure that this is an off-thread object, so that it won't have been
+ // moved by the GC.
+ DCHECK(Heap::InOffThreadSpace(outer_function_sfi_));
+ return outer_function_sfi_;
+ }
private:
// Data needed for parsing, and data needed to to be passed between thread
@@ -384,10 +406,27 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
std::unique_ptr<UnoptimizedCompilationJob> outer_function_job_;
UnoptimizedCompilationJobList inner_function_jobs_;
+ // Data needed for merging onto the main thread after background finalization.
+ // TODO(leszeks): When these are available, the above fields are not. We
+ // should add some stricter type-safety or DCHECKs to ensure that the user of
+ // the task knows this.
+ std::unique_ptr<OffThreadIsolate> off_thread_isolate_;
+ // This is a raw pointer to the off-thread allocated SharedFunctionInfo.
+ SharedFunctionInfo outer_function_sfi_;
+
int stack_size_;
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
AccountingAllocator* allocator_;
TimedHistogram* timer_;
+ LanguageMode language_mode_;
+ bool collected_source_positions_;
+
+ // True if the background compilation should be finalized on the background
+ // thread. When this is true, the ParseInfo, Parser and compilation jobs are
+ // freed on the background thread, the outer_function_sfi holds the top-level
+ // function, and the off_thread_isolate has to be merged into the main-thread
+ // Isolate.
+ bool finalize_on_background_thread_;
DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
};
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 42b2fa6e9a..1a676787df 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
-#if defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
int double_reach_bits) {
@@ -209,7 +209,7 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
return !empty ? (assm->pc_offset() - emitted_label_.pos()) : 0;
}
-#endif // defined(V8_TARGET_ARCH_PPC)
+#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
#if defined(V8_TARGET_ARCH_ARM64)
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index d2ab5641ae..581644b917 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -81,7 +81,7 @@ class ConstantPoolEntry {
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
};
-#if defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
// -----------------------------------------------------------------------------
// Embedded constant pool support
@@ -161,7 +161,7 @@ class ConstantPoolBuilder {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
-#endif // defined(V8_TARGET_ARCH_PPC)
+#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
#if defined(V8_TARGET_ARCH_ARM64)
diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h
index b49d2b64f2..7a222c960f 100644
--- a/deps/v8/src/codegen/constants-arch.h
+++ b/deps/v8/src/codegen/constants-arch.h
@@ -15,7 +15,7 @@
#include "src/codegen/mips/constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h" // NOLINT
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 9ff38bc40b..14c94ebae9 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -47,7 +47,7 @@ enum CpuFeature {
MIPSr6,
MIPS_SIMD, // MSA instructions
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
FPU,
FPR_GPR_MOV,
LWSYNC,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 3bd2f299d1..7a42e40461 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -289,6 +289,7 @@ FUNCTION_REFERENCE(wasm_word32_rol, wasm::word32_rol_wrapper)
FUNCTION_REFERENCE(wasm_word32_ror, wasm::word32_ror_wrapper)
FUNCTION_REFERENCE(wasm_word64_rol, wasm::word64_rol_wrapper)
FUNCTION_REFERENCE(wasm_word64_ror, wasm::word64_ror_wrapper)
+FUNCTION_REFERENCE(wasm_memory_init, wasm::memory_init_wrapper)
FUNCTION_REFERENCE(wasm_memory_copy, wasm::memory_copy_wrapper)
FUNCTION_REFERENCE(wasm_memory_fill, wasm::memory_fill_wrapper)
@@ -465,7 +466,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerARM64::CheckStackGuardState
#elif V8_TARGET_ARCH_ARM
#define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
@@ -910,6 +911,11 @@ bool operator!=(ExternalReference lhs, ExternalReference rhs) {
}
size_t hash_value(ExternalReference reference) {
+ if (FLAG_predictable) {
+ // Avoid ASLR non-determinism in predictable mode. For this, just take the
+ // lowest 12 bit corresponding to a 4K page size.
+ return base::hash<Address>()(reference.address() & 0xfff);
+ }
return base::hash<Address>()(reference.address());
}
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 9eb95e18d9..2c5c8348f4 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -200,6 +200,7 @@ class StatsCounter;
V(wasm_word64_ror, "wasm::word64_ror") \
V(wasm_word64_ctz, "wasm::word64_ctz") \
V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ V(wasm_memory_init, "wasm::memory_init") \
V(wasm_memory_copy, "wasm::memory_copy") \
V(wasm_memory_fill, "wasm::memory_fill") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 4cd713811b..246415ba67 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -1619,6 +1619,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
+ SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
#undef DECLARE_SSSE3_INSTRUCTION
#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
@@ -1656,6 +1657,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
}
+ SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_AVX_RM_INSTRUCTION)
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE4_AVX_RM_INSTRUCTION)
#undef DECLARE_SSE4_AVX_RM_INSTRUCTION
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 4741cdb10d..6f5778d3ca 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -31,6 +31,20 @@
namespace v8 {
namespace internal {
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
+ DCHECK_GE(index, 0);
+#ifdef V8_REVERSE_JSARGS
+ // arg[0] = esp + kPCOnStackSize;
+ // arg[i] = arg[0] + i * kSystemPointerSize;
+ return Operand(esp, kPCOnStackSize + index * kSystemPointerSize);
+#else
+ // arg[0] = (esp + kPCOnStackSize) + argc * kSystemPointerSize;
+ // arg[i] = arg[0] - i * kSystemPointerSize;
+ return Operand(esp, argc_, times_system_pointer_size,
+ kPCOnStackSize - index * kSystemPointerSize);
+#endif
+}
+
// -------------------------------------------------------------------------
// MacroAssembler implementation.
@@ -91,18 +105,6 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
}
}
-void TurboAssembler::CompareRealStackLimit(Register with) {
- CHECK(root_array_available()); // Only used by builtins.
-
- // Address through the root register. No load is needed.
- ExternalReference limit =
- ExternalReference::address_of_real_jslimit(isolate());
- DCHECK(IsAddressableThroughRootRegister(isolate(), limit));
-
- intptr_t offset = RootRegisterOffsetForExternalReference(isolate(), limit);
- cmp(with, Operand(kRootRegister, offset));
-}
-
void MacroAssembler::PushRoot(RootIndex index) {
if (root_array_available()) {
DCHECK(RootsTable::IsImmortalImmovable(index));
@@ -1107,9 +1109,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
}
Push(fun);
Push(fun);
+ // Arguments are located 2 words below the base pointer.
+#ifdef V8_REVERSE_JSARGS
+ Operand receiver_op = Operand(ebp, kSystemPointerSize * 2);
+#else
Operand receiver_op =
Operand(ebp, actual_parameter_count, times_system_pointer_size,
kSystemPointerSize * 2);
+#endif
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
@@ -1776,32 +1783,38 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- // Find two caller-saved scratch registers.
- Register scratch1 = eax;
- Register scratch2 = ecx;
- if (function == eax) scratch1 = edx;
- if (function == ecx) scratch2 = edx;
- PushPC();
- pop(scratch1);
- mov(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_pc_address(isolate()),
- scratch2),
- scratch1);
- mov(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_fp_address(isolate()),
- scratch2),
- ebp);
- }
+ // Find two caller-saved scratch registers.
+ Register pc_scratch = eax;
+ Register scratch = ecx;
+ if (function == eax) pc_scratch = edx;
+ if (function == ecx) scratch = edx;
+ PushPC();
+ pop(pc_scratch);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ DCHECK_IMPLIES(!root_array_available(), isolate() != nullptr);
+ mov(root_array_available()
+ ? Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset())
+ : ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_pc_address(isolate()),
+ scratch),
+ pc_scratch);
+ mov(root_array_available()
+ ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
+ : ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate()),
+ scratch),
+ ebp);
call(function);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- mov(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_fp_address(isolate()), edx),
- Immediate(0));
- }
+ // We don't unset the PC; the FP is the source of truth.
+ mov(root_array_available()
+ ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
+ : ExternalReferenceAsOperand(
+ ExternalReference::fast_c_call_caller_fp_address(isolate()),
+ scratch),
+ Immediate(0));
if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kSystemPointerSize));
@@ -2028,7 +2041,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in ebx (we don't need the roots array from now on).
mov(ebx, deopt_id);
@@ -2036,6 +2051,7 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
}
void TurboAssembler::Trap() { int3(); }
+void TurboAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 4d2ea96a92..40b542f375 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -24,6 +24,26 @@ using MemOperand = Operand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+// Convenient class to access arguments below the stack pointer.
+class StackArgumentsAccessor {
+ public:
+ // argc = the number of arguments not including the receiver.
+ explicit StackArgumentsAccessor(Register argc) : argc_(argc) {
+ DCHECK_NE(argc_, no_reg);
+ }
+
+ // Argument 0 is the receiver (despite argc not including the receiver).
+ Operand operator[](int index) const { return GetArgumentOperand(index); }
+
+ Operand GetArgumentOperand(int index) const;
+ Operand GetReceiverOperand() const { return GetArgumentOperand(0); }
+
+ private:
+ const Register argc_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
+};
+
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
@@ -108,8 +128,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
void Trap() override;
+ void DebugBreak() override;
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
@@ -215,7 +237,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadAddress(Register destination, ExternalReference source);
- void CompareRealStackLimit(Register with);
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Register with, Register scratch, RootIndex index);
@@ -284,6 +305,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Packsswb, packsswb)
AVX_OP3_XO(Packuswb, packuswb)
AVX_OP3_XO(Paddusb, paddusb)
+ AVX_OP3_XO(Pand, pand)
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
@@ -335,8 +357,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Minpd, minpd)
AVX_PACKED_OP3(Maxpd, maxpd)
AVX_PACKED_OP3(Cmpunordpd, cmpunordpd)
+ AVX_PACKED_OP3(Psllw, psllw)
+ AVX_PACKED_OP3(Pslld, pslld)
AVX_PACKED_OP3(Psllq, psllq)
+ AVX_PACKED_OP3(Psrlw, psrlw)
+ AVX_PACKED_OP3(Psrld, psrld)
AVX_PACKED_OP3(Psrlq, psrlq)
+ AVX_PACKED_OP3(Psraw, psraw)
+ AVX_PACKED_OP3(Psrad, psrad)
AVX_PACKED_OP3(Paddq, paddq)
AVX_PACKED_OP3(Psubq, psubq)
AVX_PACKED_OP3(Pmuludq, pmuludq)
@@ -344,7 +372,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Pavgw, pavgw)
#undef AVX_PACKED_OP3
+ AVX_PACKED_OP3_WITH_TYPE(Psllw, psllw, XMMRegister, uint8_t)
+ AVX_PACKED_OP3_WITH_TYPE(Pslld, pslld, XMMRegister, uint8_t)
AVX_PACKED_OP3_WITH_TYPE(Psllq, psllq, XMMRegister, uint8_t)
+ AVX_PACKED_OP3_WITH_TYPE(Psrlw, psrlw, XMMRegister, uint8_t)
+ AVX_PACKED_OP3_WITH_TYPE(Psrld, psrld, XMMRegister, uint8_t)
+ AVX_PACKED_OP3_WITH_TYPE(Psrlq, psrlq, XMMRegister, uint8_t)
+ AVX_PACKED_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
+ AVX_PACKED_OP3_WITH_TYPE(Psrad, psrad, XMMRegister, uint8_t)
#undef AVX_PACKED_OP3_WITH_TYPE
// Non-SSE2 instructions.
@@ -369,6 +404,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_XO_SSE3(Movddup, movddup)
#undef AVX_OP2_XO_SSE3
+
+#define AVX_OP2_XO_SSSE3(macro_name, name) \
+ AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSSE3) \
+ AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSSE3)
+ AVX_OP2_XO_SSSE3(Pabsb, pabsb)
+ AVX_OP2_XO_SSSE3(Pabsw, pabsw)
+ AVX_OP2_XO_SSSE3(Pabsd, pabsd)
+
+#undef AVX_OP2_XO_SSE3
+
#define AVX_OP2_XO_SSE4(macro_name, name) \
AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
@@ -494,6 +539,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Handle<Code> code_target,
diff --git a/deps/v8/src/codegen/ia32/sse-instr.h b/deps/v8/src/codegen/ia32/sse-instr.h
index c63b6aa3f3..b8a7a3c827 100644
--- a/deps/v8/src/codegen/ia32/sse-instr.h
+++ b/deps/v8/src/codegen/ia32/sse-instr.h
@@ -67,6 +67,12 @@
V(psignw, 66, 0F, 38, 09) \
V(psignd, 66, 0F, 38, 0A)
+// SSSE3 instructions whose AVX version has two operands.
+#define SSSE3_UNOP_INSTRUCTION_LIST(V) \
+ V(pabsb, 66, 0F, 38, 1C) \
+ V(pabsw, 66, 0F, 38, 1D) \
+ V(pabsd, 66, 0F, 38, 1E)
+
#define SSE4_INSTRUCTION_LIST(V) \
V(packusdw, 66, 0F, 38, 2B) \
V(pminsb, 66, 0F, 38, 38) \
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 345c82454e..42b45c0f33 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -380,6 +380,18 @@ void WasmMemoryGrowDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void WasmTableInitDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data,
+ kParameterCount - kStackArgumentsCount);
+}
+
+void WasmTableCopyDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data,
+ kParameterCount - kStackArgumentsCount);
+}
+
void WasmTableGetDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
@@ -401,12 +413,23 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
}
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
-void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
+void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
-void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
+void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data,
+ kParameterCount - kStackArgumentsCount);
+}
+
+void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 0701c66be5..36099d57fe 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -91,9 +91,13 @@ namespace internal {
V(Typeof) \
V(Void) \
V(WasmAtomicNotify) \
- V(WasmI32AtomicWait) \
- V(WasmI64AtomicWait) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI32AtomicWait64) \
+ V(WasmI64AtomicWait32) \
+ V(WasmI64AtomicWait64) \
V(WasmMemoryGrow) \
+ V(WasmTableInit) \
+ V(WasmTableCopy) \
V(WasmTableGet) \
V(WasmTableSet) \
V(WasmThrow) \
@@ -454,18 +458,22 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
CallInterfaceDescriptorData::kAllowVarArgs, 1, ##__VA_ARGS__)
-#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
- MachineType machine_types[] = {__VA_ARGS__}; \
- static_assert( \
- kReturnCount + kParameterCount == arraysize(machine_types), \
- "Parameter names definition is not consistent with parameter types"); \
- data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
- kParameterCount, machine_types, \
- arraysize(machine_types)); \
+#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override { \
+ MachineType machine_types[] = {__VA_ARGS__}; \
+ static_assert( \
+ kReturnCount + kParameterCount == arraysize(machine_types), \
+ "Parameter names definition is not consistent with parameter types"); \
+ data->InitializePlatformIndependent( \
+ Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
+ machine_types, arraysize(machine_types)); \
}
+#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
+ DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG( \
+ CallInterfaceDescriptorData::kNoFlags, __VA_ARGS__)
+
#define DEFINE_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
@@ -1039,6 +1047,7 @@ class ArrayNoArgumentConstructorDescriptor
ArrayNArgumentsConstructorDescriptor)
};
+#ifdef V8_REVERSE_JSARGS
class ArraySingleArgumentConstructorDescriptor
: public ArrayNArgumentsConstructorDescriptor {
public:
@@ -1046,15 +1055,35 @@ class ArraySingleArgumentConstructorDescriptor
// ArrayNArgumentsConstructorDescriptor and it declares indices for
// JS arguments passed on the expression stack.
DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
- kFunctionParameter, kArraySizeSmiParameter)
+ kArraySizeSmiParameter, kReceiverParameter)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::AnyTagged(), // kAllocationSite
MachineType::Int32(), // kActualArgumentsCount
- MachineType::AnyTagged(), // kFunctionParameter
+ // JS arguments on the stack
+ MachineType::AnyTagged(), // kArraySizeSmiParameter
+ MachineType::AnyTagged()) // kReceiverParameter
+ DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
+ ArrayNArgumentsConstructorDescriptor)
+};
+#else
+class ArraySingleArgumentConstructorDescriptor
+ : public ArrayNArgumentsConstructorDescriptor {
+ public:
+ // This descriptor declares same register arguments as the parent
+ // ArrayNArgumentsConstructorDescriptor and it declares indices for
+ // JS arguments passed on the expression stack.
+ DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
+ kReceiverParameter, kArraySizeSmiParameter)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
+ MachineType::AnyTagged(), // kAllocationSite
+ MachineType::Int32(), // kActualArgumentsCount
+ // JS arguments on the stack
+ MachineType::AnyTagged(), // kReceiverParameter
MachineType::AnyTagged()) // kArraySizeSmiParameter
DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
ArrayNArgumentsConstructorDescriptor)
};
+#endif
class CompareDescriptor : public CallInterfaceDescriptor {
public:
@@ -1289,6 +1318,52 @@ class WasmMemoryGrowDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmMemoryGrowDescriptor, CallInterfaceDescriptor)
};
+class WasmTableInitDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kTableIndex,
+ kSegmentIndex)
+ DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
+ MachineType::Int32(), // kSource
+ MachineType::Int32(), // kSize
+ MachineType::AnyTagged(), // kTableIndex
+ MachineType::AnyTagged(), // kSegmentindex
+ )
+
+#if V8_TARGET_ARCH_IA32
+ static constexpr bool kPassLastArgOnStack = true;
+#else
+ static constexpr bool kPassLastArgOnStack = false;
+#endif
+
+ // Pass the last parameter through the stack.
+ static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
+
+ DECLARE_DESCRIPTOR(WasmTableInitDescriptor, CallInterfaceDescriptor)
+};
+
+class WasmTableCopyDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kDestinationTable,
+ kSourceTable)
+ DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
+ MachineType::Int32(), // kSource
+ MachineType::Int32(), // kSize
+ MachineType::AnyTagged(), // kDestinationTable
+ MachineType::AnyTagged(), // kSourceTable
+ )
+
+#if V8_TARGET_ARCH_IA32
+ static constexpr bool kPassLastArgOnStack = true;
+#else
+ static constexpr bool kPassLastArgOnStack = false;
+#endif
+
+ // Pass the last parameter through the stack.
+ static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
+
+ DECLARE_DESCRIPTOR(WasmTableCopyDescriptor, CallInterfaceDescriptor)
+};
+
class WasmTableGetDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kTableIndex, kEntryIndex)
@@ -1361,27 +1436,62 @@ class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmAtomicNotifyDescriptor, CallInterfaceDescriptor)
};
-class WasmI32AtomicWaitDescriptor final : public CallInterfaceDescriptor {
+class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
+ kTimeoutHigh)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Int32(), // kExpectedValue
+ MachineType::Uint32(), // kTimeoutLow
+ MachineType::Uint32()) // kTimeoutHigh
+ DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor, CallInterfaceDescriptor)
+};
+
+class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
+ kTimeoutLow, kTimeoutHigh)
+
+ DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(
+ CallInterfaceDescriptorData::kNoStackScan, // allow untagged stack params
+ MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Uint32(), // kExpectedValueLow
+ MachineType::Uint32(), // kExpectedValueHigh
+ MachineType::Uint32(), // kTimeoutLow
+ MachineType::Uint32()) // kTimeoutHigh
+
+#if V8_TARGET_ARCH_IA32
+ static constexpr bool kPassLastArgOnStack = true;
+#else
+ static constexpr bool kPassLastArgOnStack = false;
+#endif
+
+ // Pass the last parameter through the stack.
+ static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
+
+ DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
+};
+
+class WasmI32AtomicWait64Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Int32(), // kExpectedValue
- MachineType::Float64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI32AtomicWaitDescriptor, CallInterfaceDescriptor)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Int32(), // kExpectedValue
+ MachineType::Uint64()) // kTimeout
+ DECLARE_DESCRIPTOR(WasmI32AtomicWait64Descriptor, CallInterfaceDescriptor)
};
-class WasmI64AtomicWaitDescriptor final : public CallInterfaceDescriptor {
+class WasmI64AtomicWait64Descriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueHigh, kExpectedValueLow,
- kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(
- MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint32(), // kExpectedValueHigh
- MachineType::Uint32(), // kExpectedValueLow
- MachineType::Float64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI64AtomicWaitDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Uint64(), // kExpectedValue
+ MachineType::Uint64()) // kTimeout
+ DECLARE_DESCRIPTOR(WasmI64AtomicWait64Descriptor, CallInterfaceDescriptor)
};
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index b89da584c1..ea05441594 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -268,10 +268,14 @@ class MachineType {
}
}
- bool LessThanOrEqualPointerSize() {
+ constexpr bool LessThanOrEqualPointerSize() const {
return ElementSizeLog2Of(this->representation()) <= kSystemPointerSizeLog2;
}
+ constexpr byte MemSize() const {
+ return 1 << i::ElementSizeLog2Of(this->representation());
+ }
+
private:
MachineRepresentation representation_;
MachineSemantic semantic_;
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 28b3b3a3c4..01175e585e 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -40,7 +40,7 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_ARM
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
#include "src/codegen/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/codegen/mips/constants-mips.h b/deps/v8/src/codegen/mips/constants-mips.h
index d2b3f6b08f..67d12155a3 100644
--- a/deps/v8/src/codegen/mips/constants-mips.h
+++ b/deps/v8/src/codegen/mips/constants-mips.h
@@ -89,15 +89,18 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness
#endif
-#define IsFp64Mode() (kFpuMode == kFP64)
-#define IsFp32Mode() (kFpuMode == kFP32)
-#define IsFpxxMode() (kFpuMode == kFPXX)
+constexpr bool IsFp64Mode() { return kFpuMode == kFP64; }
+constexpr bool IsFp32Mode() { return kFpuMode == kFP32; }
+constexpr bool IsFpxxMode() { return kFpuMode == kFPXX; }
#ifndef _MIPS_ARCH_MIPS32RX
-#define IsMipsArchVariant(check) (kArchVariant == check)
+constexpr bool IsMipsArchVariant(const ArchVariants check) {
+ return kArchVariant == check;
+}
#else
-#define IsMipsArchVariant(check) \
- (CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
+bool IsMipsArchVariant(const ArchVariants check) {
+ return CpuFeatures::IsSupported(static_cast<CpuFeature>(check));
+}
#endif
#if defined(V8_TARGET_LITTLE_ENDIAN)
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 0a36e26577..8b8bc1b56d 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -31,19 +31,34 @@ bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
return reg.code() % 2 == 0;
}
-void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
+void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- /* Register t4 correspond to f12 FPU register. */
- const Register default_stub_registers[] = {a0, a1, t4};
+ const Register default_stub_registers[] = {a0, a1, a2, a3};
CHECK_EQ(static_cast<size_t>(kParameterCount),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
+void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- /* Register t4 correspond to f12 FPU register. */
- const Register default_stub_registers[] = {a0, a1, a2, t4};
+ const Register default_stub_registers[] = {a0, a1, a2};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
+ CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
+ default_stub_registers);
+}
+
+void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2};
CHECK_EQ(static_cast<size_t>(kParameterCount),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 6526f48da5..6ae70798c1 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -4668,6 +4668,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// Debugging.
void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
@@ -5374,31 +5375,38 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- // 't' registers are caller-saved so this is safe as a scratch register.
- Register scratch1 = t4;
- Register scratch2 = t5;
- DCHECK(!AreAliased(scratch1, scratch2, function_base));
-
- Label get_pc;
- mov(scratch1, ra);
- Call(&get_pc);
-
- bind(&get_pc);
- mov(scratch2, ra);
- mov(ra, scratch1);
-
- li(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- sw(scratch2, MemOperand(scratch1));
- li(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- sw(fp, MemOperand(scratch1));
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t4;
+ Register scratch = t5;
+ DCHECK(!AreAliased(pc_scratch, scratch, function_base));
+
+ mov(scratch, ra);
+ nal();
+ mov(pc_scratch, ra);
+ mov(ra, scratch);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ sw(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ sw(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ sw(pc_scratch, MemOperand(scratch));
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ sw(fp, MemOperand(scratch));
}
Call(function_base, function_offset);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- Register scratch = t4;
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ sw(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
sw(zero_reg, MemOperand(scratch));
}
@@ -5469,7 +5477,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deipt id in kRootRegister (we don't need the roots array from now
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index f916b9d101..d7441c2fcf 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -103,6 +103,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Debugging.
void Trap() override;
+ void DebugBreak() override;
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
@@ -236,7 +237,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -813,6 +815,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
protected:
void BranchLong(Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index e32d6c6d6e..84910f1ee9 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -31,19 +31,34 @@ bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
return reg.code() % 2 == 0;
}
-void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
+void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- /* Register t0 correspond to f12 FPU register. */
- const Register default_stub_registers[] = {a0, a1, t0};
+ const Register default_stub_registers[] = {a0, a1, a2, a3};
CHECK_EQ(static_cast<size_t>(kParameterCount),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
+void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- /* Register t0 correspond to f12 FPU register. */
- const Register default_stub_registers[] = {a0, a1, a2, t0};
+ const Register default_stub_registers[] = {a0, a1, a2};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
+ CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
+ default_stub_registers);
+}
+
+void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2};
CHECK_EQ(static_cast<size_t>(kParameterCount),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index a9bbcf54aa..78f3228f24 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -4984,6 +4984,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// Debugging.
void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
@@ -5698,31 +5699,38 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- // 't' registers are caller-saved so this is safe as a scratch register.
- Register scratch1 = t1;
- Register scratch2 = t2;
- DCHECK(!AreAliased(scratch1, scratch2, function));
-
- Label get_pc;
- mov(scratch1, ra);
- Call(&get_pc);
-
- bind(&get_pc);
- mov(scratch2, ra);
- mov(ra, scratch1);
-
- li(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- Sd(scratch2, MemOperand(scratch1));
- li(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- Sd(fp, MemOperand(scratch1));
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ mov(scratch, ra);
+ nal();
+ mov(pc_scratch, ra);
+ mov(ra, scratch);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ Sd(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ Sd(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Sd(pc_scratch, MemOperand(scratch));
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(fp, MemOperand(scratch));
}
Call(function);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- Register scratch = t1;
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ Sd(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
Sd(zero_reg, MemOperand(scratch));
}
@@ -5793,7 +5801,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in kRootRegister (we don't need the roots array from now
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index d1f681c38a..c8b8d2876f 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -123,6 +123,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Debugging.
void Trap() override;
+ void DebugBreak() override;
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
@@ -259,7 +260,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -819,6 +821,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 321386af14..a305a104f0 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -36,7 +36,7 @@
#include "src/codegen/ppc/assembler-ppc.h"
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/base/bits.h"
#include "src/base/cpu.h"
@@ -1960,4 +1960,4 @@ Register UseScratchRegisterScope::Acquire() {
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 222d4c28ad..b27a4fd8fe 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -230,10 +230,6 @@ class Assembler : public AssemblerBase {
return link(L) - pc_offset();
}
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
V8_INLINE static bool IsConstantPoolLoadStart(
Address pc, ConstantPoolEntry::Access* access = nullptr);
V8_INLINE static bool IsConstantPoolLoadEnd(
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.cc b/deps/v8/src/codegen/ppc/constants-ppc.cc
index 4cee2cbcb5..ee2e19aa24 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.cc
+++ b/deps/v8/src/codegen/ppc/constants-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
@@ -46,4 +46,4 @@ int Registers::Number(const char* name) {
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index d8892097bd..fe7df45ae5 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -20,7 +20,7 @@
#define UNIMPLEMENTED_PPC()
#endif
-#if V8_HOST_ARCH_PPC && \
+#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
(V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
(!defined(_CALL_ELF) || _CALL_ELF == 1)))
#define ABI_USES_FUNCTION_DESCRIPTORS 1
@@ -28,28 +28,30 @@
#define ABI_USES_FUNCTION_DESCRIPTORS 0
#endif
-#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \
+ V8_TARGET_ARCH_PPC64
#define ABI_PASSES_HANDLES_IN_REGS 1
#else
#define ABI_PASSES_HANDLES_IN_REGS 0
#endif
-#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN || \
- (defined(_CALL_ELF) && _CALL_ELF == 2)
+#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || !V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
#else
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
#endif
-#if !V8_HOST_ARCH_PPC || \
- (V8_TARGET_ARCH_PPC64 && \
+#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || \
+ (V8_TARGET_ARCH_PPC64 && \
(V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)))
#define ABI_CALL_VIA_IP 1
#else
#define ABI_CALL_VIA_IP 0
#endif
-#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \
+ V8_TARGET_ARCH_PPC64
#define ABI_TOC_REGISTER 2
#else
#define ABI_TOC_REGISTER 13
diff --git a/deps/v8/src/codegen/ppc/cpu-ppc.cc b/deps/v8/src/codegen/ppc/cpu-ppc.cc
index 243fa29a46..9559af7778 100644
--- a/deps/v8/src/codegen/ppc/cpu-ppc.cc
+++ b/deps/v8/src/codegen/ppc/cpu-ppc.cc
@@ -4,7 +4,7 @@
// CPU specific code for ppc independent of OS goes here.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/cpu-features.h"
@@ -47,4 +47,4 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
} // namespace v8
#undef INSTR_AND_DATA_CACHE_COHERENCY
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index 3d378d7a43..f2264b05fa 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/interface-descriptors.h"
@@ -286,4 +286,4 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 96a9058240..ca6d472c93 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -5,7 +5,7 @@
#include <assert.h> // For assert
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
@@ -673,23 +673,6 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
mtlr(r0);
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- RegList regs = kSafepointSavedRegisters;
- int index = 0;
-
- DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
-
- for (int16_t i = 0; i < reg_code; i++) {
- if ((regs & (1 << i)) != 0) {
- index++;
- }
- }
-
- return index;
-}
-
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
@@ -1914,20 +1897,32 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- Register scratch1 = r7;
- Register scratch2 = r8;
- Push(scratch1, scratch2);
+ Register addr_scratch = r7;
+ Register scratch = r8;
+ Push(scratch);
+ mflr(scratch);
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ LoadPC(r0);
+ StoreP(r0, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreP(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Push(addr_scratch);
- mflr(scratch2);
- Move(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
LoadPC(r0);
- StoreP(r0, MemOperand(scratch1));
- Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreP(fp, MemOperand(scratch1));
- mtlr(scratch2);
- Pop(scratch1, scratch2);
+ StoreP(r0, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(fp, MemOperand(addr_scratch));
+ Pop(addr_scratch);
}
+ mtlr(scratch);
+ Pop(scratch);
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
@@ -1947,15 +1942,21 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- Register scratch1 = r7;
- Register scratch2 = r8;
- Push(scratch1, scratch2);
- Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- mov(scratch2, Operand::Zero());
- StoreP(scratch2, MemOperand(scratch1));
- Pop(scratch1, scratch2);
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r0;
+ mov(zero_scratch, Operand::Zero());
+
+ if (root_array_available()) {
+ StoreP(
+ zero_scratch,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Push(addr_scratch);
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(zero_scratch, MemOperand(addr_scratch));
+ Pop(addr_scratch);
}
// Remove frame bought in PrepareCallCFunction
@@ -3013,7 +3014,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
SizeOfCodeGeneratedSince(&start_call));
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r29 (we don't need the roots array from now on).
@@ -3036,8 +3039,9 @@ void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
}
void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 87de0a77c8..2c46124b24 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -365,6 +365,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatResult(DoubleRegister dst);
void Trap() override;
+ void DebugBreak() override;
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
@@ -419,7 +420,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void JumpCodeObject(Register code_object) override;
void CallBuiltinByIndex(Register builtin_index) override;
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -638,6 +640,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -763,7 +775,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, Register expected_parameter_count,
Register actual_parameter_count, InvokeFlag flag);
- void DebugBreak();
// Frame restart support
void MaybeDropFrames();
@@ -952,13 +963,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeFlag flag);
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index 92f7632e14..8c89aecec7 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -59,7 +59,6 @@ namespace internal {
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 32;
// Caller-saved/arguments registers
const RegList kJSCallerSaved = 1 << 3 | // r3 a1
@@ -138,10 +137,6 @@ const RegList kCalleeSavedDoubles = 1 << 14 | // d14
const int kNumCalleeSavedDoubles = 18;
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 32;
-
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
@@ -177,12 +172,6 @@ const int kStackFrameLRSlot = 2;
const int kStackFrameExtraParamSlot = 14;
#endif
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index aa668a9158..21a7233016 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -16,7 +16,7 @@
#include "src/codegen/arm64/register-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/codegen/arm/register-arm.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/register-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/register-mips.h"
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index c8f768e6de..5752b46339 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableDoubleRegisterCount;
+#elif V8_TARGET_ARCH_PPC64
+ kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_S390
kMaxAllocatableDoubleRegisterCount;
#else
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 039a6746b1..9f07978932 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -329,7 +329,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
return false;
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
- defined(V8_TARGET_ARCH_S390)
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390)
return true;
#endif
}
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index f911bdabf6..e34cfa0cbd 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -48,7 +48,9 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() { return false; }
+bool CpuFeatures::SupportsWasmSimd128() {
+ return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
+}
void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
@@ -114,7 +116,25 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
-int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+void Assembler::set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode) {
+ Assembler::set_target_address_at(
+ pc, constant_pool, static_cast<Address>(target), icache_flush_mode);
+}
+
+int RelocInfo::target_address_size() {
+ if (IsCodedSpecially()) {
+ return Assembler::kSpecialTargetSize;
+ } else {
+ return kSystemPointerSize;
+ }
+}
+
+Tagged_t Assembler::target_compressed_address_at(Address pc,
+ Address constant_pool) {
+ return static_cast<Tagged_t>(target_address_at(pc, constant_pool));
+}
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
SixByteInstr instr =
@@ -124,32 +144,58 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
- return HeapObject::cast(
- Object(Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ host_.address(),
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
- return target_object();
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ isolate,
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return target_object();
+ }
+}
+
+Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
+ Address pc, Address const_pool) {
+ return GetEmbeddedObject(target_compressed_address_at(pc, const_pool));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
- rmode_ == FULL_EMBEDDED_OBJECT);
- if (rmode_ == FULL_EMBEDDED_OBJECT) {
+ IsEmbeddedObjectMode(rmode_));
+ if (IsCodeTarget(rmode_) || IsRelativeCodeTarget(rmode_)) {
+ return Handle<HeapObject>::cast(origin->code_target_object_handle_at(pc_));
+ } else {
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
+ }
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
- } else {
- return Handle<HeapObject>::cast(origin->code_target_object_handle_at(pc_));
}
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
- icache_flush_mode);
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(
+ pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
+ } else {
+ DCHECK(IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
@@ -187,13 +233,16 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
void RelocInfo::WipeOut() {
- DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory<Address>(pc_) = kNullAddress;
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(pc_, constant_pool_,
+ kNullAddress);
} else if (IsInternalReferenceEncoded(rmode_) || IsOffHeapTarget(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 88d2265712..d96bfd8b84 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -580,16 +580,6 @@ void Assembler::next(Label* L) {
}
}
-bool Assembler::is_near(Label* L, Condition cond) {
- DCHECK(L->is_bound());
- if (L->is_bound() == false) return false;
-
- int maxReach = ((cond == al) ? 26 : 16);
- int offset = L->pos() - pc_offset();
-
- return is_intn(offset, maxReach);
-}
-
int Assembler::link(Label* L) {
int position;
if (L->is_bound()) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 933996050c..bf746df27f 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -266,27 +266,30 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Otherwise, returns the current pc_offset().
int link(Label* L);
- // Determines if Label is bound and near enough so that a single
- // branch instruction can be used to reach it.
- bool is_near(Label* L, Condition cond);
-
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
int branch_offset(Label* L) { return link(L) - pc_offset(); }
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
void load_label_offset(Register r1, Label* L);
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Tagged_t target_compressed_address_at(Address pc,
+ Address constant_pool);
V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ inline static void set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
inline Handle<Object> code_target_object_handle_at(Address pc);
+ inline Handle<HeapObject> compressed_embedded_object_handle_at(
+ Address pc, Address constant_pool);
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
diff --git a/deps/v8/src/codegen/s390/constants-s390.h b/deps/v8/src/codegen/s390/constants-s390.h
index e2a906204e..6cd5e4d9fa 100644
--- a/deps/v8/src/codegen/s390/constants-s390.h
+++ b/deps/v8/src/codegen/s390/constants-s390.h
@@ -613,7 +613,9 @@ using SixByteInstr = uint64_t;
V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \
V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
V(vfms, VFMS, 0xE78E) /* type = VRR_E VECTOR FP MULTIPLY AND SUBTRACT */ \
- V(vfma, VFMA, 0xE78F) /* type = VRR_E VECTOR FP MULTIPLY AND ADD */
+ V(vfnms, VFNMS, \
+ 0xE79E) /* type = VRR_E VECTOR FP NEGATIVE MULTIPLY AND SUBTRACT */ \
+ V(vfma, VFMA, 0xE78F) /* type = VRR_E VECTOR FP MULTIPLY AND ADD */
#define S390_VRI_C_OPCODE_LIST(V) \
V(vrep, VREP, 0xE74D) /* type = VRI_C VECTOR REPLICATE */
@@ -2331,6 +2333,13 @@ class VRR_E_Instruction : SixByteInstruction {
DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 28, 32)
};
+class VRR_F_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20)
+};
+
class VRX_Instruction : SixByteInstruction {
public:
DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index c540a5773b..77bf0ee916 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -122,7 +122,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadP(destination, MemOperand(destination, offset), r1);
+ LoadTaggedPointerField(
+ destination,
+ FieldMemOperand(destination,
+ FixedArray::OffsetOfElementAt(constant_index)),
+ r1);
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
@@ -287,15 +291,22 @@ void TurboAssembler::Push(Smi smi) {
push(r0);
}
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
+void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ EmbeddedObjectIndex index = AddEmbeddedObject(value);
+ DCHECK(is_uint32(index));
+ mov(dst, Operand(static_cast<int>(index), rmode));
+ } else {
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
+ mov(dst, Operand(value.address(), rmode));
}
- mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
@@ -426,6 +437,96 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
+void TurboAssembler::LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ LoadP(destination, field_operand, scratch);
+ }
+}
+
+void TurboAssembler::LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand);
+ } else {
+ LoadP(destination, field_operand, scratch);
+ }
+}
+
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ if (SmiValuesAre31Bits()) {
+ LoadW(dst, src);
+ } else {
+ LoadP(dst, src);
+ }
+ SmiUntag(dst);
+}
+
+void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+ SmiUntag(dst, src);
+}
+
+void TurboAssembler::StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ RecordComment("[ StoreTagged");
+ StoreW(value, dst_field_operand);
+ RecordComment("]");
+ } else {
+ StoreP(value, dst_field_operand, scratch);
+ }
+}
+
+void TurboAssembler::DecompressTaggedSigned(Register destination,
+ Register src) {
+ RecordComment("[ DecompressTaggedSigned");
+ llgfr(destination, src);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedSigned(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTaggedSigned");
+ llgf(destination, field_operand);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(Register destination,
+ Register source) {
+ RecordComment("[ DecompressTaggedPointer");
+ llgfr(destination, source);
+ agr(destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTaggedPointer");
+ llgf(destination, field_operand);
+ agr(destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressAnyTagged");
+ llgf(destination, field_operand);
+ agr(destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination,
+ Register source) {
+ RecordComment("[ DecompressAnyTagged");
+ llgfr(destination, source);
+ agr(destination, kRootRegister);
+ RecordComment("]");
+}
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@@ -443,12 +544,12 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kSystemPointerSize.
- DCHECK(IsAligned(offset, kSystemPointerSize));
+ DCHECK(IsAligned(offset, kTaggedSize));
lay(dst, MemOperand(object, offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- AndP(r0, dst, Operand(kSystemPointerSize - 1));
+ AndP(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, Label::kNear);
stop();
bind(&ok);
@@ -582,7 +683,8 @@ void MacroAssembler::RecordWrite(Register object, Register address,
SmiCheck smi_check) {
DCHECK(object != value);
if (emit_debug_code()) {
- CmpP(value, MemOperand(address));
+ LoadTaggedPointerField(r0, MemOperand(address));
+ CmpP(value, r0);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -670,23 +772,6 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- RegList regs = kSafepointSavedRegisters;
- int index = 0;
-
- DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
-
- for (int16_t i = 0; i < reg_code; i++) {
- if ((regs & (1 << i)) != 0) {
- index++;
- }
- }
-
- return index;
-}
-
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN
@@ -1348,17 +1433,17 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedPointerField(code,
+ FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(code);
}
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
}
void MacroAssembler::InvokeFunctionWithNewTarget(
@@ -1372,8 +1457,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_reg = r4;
Register temp_reg = r6;
- LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
+ LoadTaggedPointerField(
+ temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
LoadLogicalHalfWordP(
expected_reg,
FieldMemOperand(temp_reg,
@@ -1394,7 +1480,8 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r3);
// Get the function and setup the context.
- LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp,
+ FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
actual_parameter_count, flag);
@@ -1464,7 +1551,11 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
- CmpP(obj, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
+ int32_t offset = RootRegisterOffsetForRootIndex(index);
+#ifdef V8_TARGET_BIG_ENDIAN
+ offset += (COMPRESS_POINTERS_BOOL ? kTaggedSize : 0);
+#endif
+ CompareTagged(obj, MemOperand(kRootRegister, offset));
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -1651,14 +1742,16 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void MacroAssembler::LoadMap(Register destination, Register object) {
- LoadP(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(destination,
+ FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadMap(dst, cp);
- LoadP(dst, FieldMemOperand(
- dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadP(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedPointerField(
+ dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
void MacroAssembler::AssertNotSmi(Register object) {
@@ -1845,16 +1938,24 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- Register scratch = r6;
- push(scratch);
+ Register addr_scratch = r1;
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ LoadPC(r0);
+ StoreP(r0, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreP(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
- Move(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
LoadPC(r0);
- StoreP(r0, MemOperand(scratch));
- Move(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreP(fp, MemOperand(scratch));
- pop(scratch);
+ StoreP(r0, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(fp, MemOperand(addr_scratch));
}
// Just call directly. The function called cannot cause a GC, or
@@ -1868,15 +1969,19 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- Register scratch1 = r6;
- Register scratch2 = r7;
- Push(scratch1, scratch2);
- Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- lghi(scratch2, Operand::Zero());
- StoreP(scratch2, MemOperand(scratch1));
- Pop(scratch1, scratch2);
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r0;
+ lghi(zero_scratch, Operand::Zero());
+
+ if (root_array_available()) {
+ StoreP(
+ zero_scratch,
+ MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(zero_scratch, MemOperand(addr_scratch));
}
int stack_passed_arguments =
@@ -3757,9 +3862,15 @@ void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
ldebr(dst, dst);
}
-void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem) {
- DCHECK(is_uint12(mem.offset()));
- vl(dst, mem, Condition(0));
+void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
+ Register scratch) {
+ if (is_uint12(mem.offset())) {
+ vl(dst, mem, Condition(0));
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ lay(scratch, mem);
+ vl(dst, MemOperand(scratch), Condition(0));
+ }
}
// Store Double Precision (64-bit) Floating Point number to memory
@@ -3789,9 +3900,15 @@ void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
StoreFloat32(scratch, mem);
}
-void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
- DCHECK(is_uint12(mem.offset()));
- vst(src, mem, Condition(0));
+void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
+ Register scratch) {
+ if (is_uint12(mem.offset())) {
+ vst(src, mem, Condition(0));
+ } else {
+ DCHECK(is_int20(mem.offset()));
+ lay(scratch, mem);
+ vst(src, MemOperand(scratch), Condition(0));
+ }
}
void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
@@ -4197,13 +4314,17 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
}
void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
- DoubleRegister scratch_0,
- DoubleRegister scratch_1) {
- DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadFloat32(scratch_0, src);
- LoadFloat32(scratch_1, dst);
- StoreFloat32(scratch_0, dst);
- StoreFloat32(scratch_1, src);
+ DoubleRegister scratch) {
+ // push d0, to be used as scratch
+ lay(sp, MemOperand(sp, -kDoubleSize));
+ StoreDouble(d0, MemOperand(sp));
+ LoadFloat32(scratch, src);
+ LoadFloat32(d0, dst);
+ StoreFloat32(scratch, dst);
+ StoreFloat32(d0, src);
+ // restore d0
+ LoadDouble(d0, MemOperand(sp));
+ lay(sp, MemOperand(sp, kDoubleSize));
}
void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
@@ -4224,13 +4345,17 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
}
void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
- DoubleRegister scratch_0,
- DoubleRegister scratch_1) {
- DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadDouble(scratch_0, src);
- LoadDouble(scratch_1, dst);
- StoreDouble(scratch_0, dst);
- StoreDouble(scratch_1, src);
+ DoubleRegister scratch) {
+ // push d0, to be used as scratch
+ lay(sp, MemOperand(sp, -kDoubleSize));
+ StoreDouble(d0, MemOperand(sp));
+ LoadDouble(scratch, src);
+ LoadDouble(d0, dst);
+ StoreDouble(scratch, dst);
+ StoreDouble(d0, src);
+ // restore d0
+ LoadDouble(d0, MemOperand(sp));
+ lay(sp, MemOperand(sp, kDoubleSize));
}
void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
@@ -4245,17 +4370,22 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
Simd128Register scratch) {
DCHECK(!AreAliased(src, scratch));
vlr(scratch, src, Condition(0), Condition(0), Condition(0));
- LoadSimd128(src, dst);
- StoreSimd128(scratch, dst);
+ LoadSimd128(src, dst, ip);
+ StoreSimd128(scratch, dst, ip);
}
void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
- Simd128Register scratch_0,
- Simd128Register scratch_1) {
- LoadSimd128(scratch_0, src);
- LoadSimd128(scratch_1, dst);
- StoreSimd128(scratch_0, dst);
- StoreSimd128(scratch_1, src);
+ Simd128Register scratch) {
+ // push d0, to be used as scratch
+ lay(sp, MemOperand(sp, -kSimd128Size));
+ StoreSimd128(d0, MemOperand(sp), ip);
+ LoadSimd128(scratch, src, ip);
+ LoadSimd128(d0, dst, ip);
+ StoreSimd128(scratch, dst, ip);
+ StoreSimd128(d0, src, ip);
+ // restore d0
+ LoadSimd128(d0, MemOperand(sp), ip);
+ lay(sp, MemOperand(sp, kSimd128Size));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
@@ -4286,20 +4416,17 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
-
// The builtin_index register contains the builtin index as a Smi.
- // Untagging is folded into the indexing operand below.
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 0);
- ShiftLeftP(builtin_index, builtin_index,
- Operand(kSystemPointerSizeLog2 - kSmiShift));
-#else
- ShiftRightArithP(builtin_index, builtin_index,
- Operand(kSmiShift - kSystemPointerSizeLog2));
-#endif
- AddP(builtin_index, builtin_index,
- Operand(IsolateData::builtin_entry_table_offset()));
- LoadP(builtin_index, MemOperand(kRootRegister, builtin_index));
+ if (SmiValuesAre32Bits()) {
+ ShiftRightArithP(builtin_index, builtin_index,
+ Operand(kSmiShift - kSystemPointerSizeLog2));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ ShiftLeftP(builtin_index, builtin_index,
+ Operand(kSystemPointerSizeLog2 - kSmiShift));
+ }
+ LoadP(builtin_index, MemOperand(kRootRegister, builtin_index,
+ IsolateData::builtin_entry_table_offset()));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -4382,7 +4509,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r10 (we don't need the roots array from now on).
@@ -4392,6 +4521,7 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
}
void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index d0d6ca6c06..8b267eb2c6 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -153,7 +153,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -178,7 +179,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
- void Move(Register dst, Handle<HeapObject> value);
+ void Move(Register dst, Handle<HeapObject> source,
+ RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
@@ -417,7 +419,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
- void LoadSimd128(Simd128Register dst, const MemOperand& mem);
+ void LoadSimd128(Simd128Register dst, const MemOperand& mem,
+ Register scratch);
void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
@@ -449,7 +452,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
DoubleRegister scratch);
- void StoreSimd128(Simd128Register src, const MemOperand& mem);
+ void StoreSimd128(Simd128Register src, const MemOperand& mem,
+ Register scratch);
void Branch(Condition c, const Operand& opnd);
void BranchOnCount(Register r1, Label* l);
@@ -782,19 +786,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SwapFloat32(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch);
void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
- void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
- DoubleRegister scratch_1);
+ void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch);
void SwapDouble(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch);
void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
- void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
- DoubleRegister scratch_1);
+ void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch);
void SwapSimd128(Simd128Register src, Simd128Register dst,
Simd128Register scratch);
void SwapSimd128(Simd128Register src, MemOperand dst,
Simd128Register scratch);
- void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch_0,
- Simd128Register scratch_1);
+ void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
// Cleanse pointer address on 31bit by zero out top bit.
// This is a NOP on 64-bit.
@@ -849,6 +850,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatResult(DoubleRegister dst);
void Trap() override;
+ void DebugBreak() override;
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
@@ -971,16 +973,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// High bits must be identical to fit into an 32-bit integer
cgfr(value, value);
}
- void SmiUntag(Register reg, int scale = 0) { SmiUntag(reg, reg, scale); }
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
- void SmiUntag(Register dst, Register src, int scale = 0) {
- if (scale > kSmiShift) {
- ShiftLeftP(dst, src, Operand(scale - kSmiShift));
- } else if (scale < kSmiShift) {
- ShiftRightArithP(dst, src, Operand(kSmiShift - scale));
+ void SmiUntag(Register dst, const MemOperand& src);
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre31Bits()) {
+ ShiftRightArith(dst, src, Operand(kSmiShift));
} else {
- // do nothing
+ ShiftRightArithP(dst, src, Operand(kSmiShift));
}
+ lgfr(dst, dst);
}
// Activation support.
@@ -996,11 +998,50 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ComputeCodeStartAddress(Register dst);
void LoadPC(Register dst);
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
+ // ---------------------------------------------------------------------------
+ // Pointer compression Support
+
+ // Loads a field containing a HeapObject and decompresses it if pointer
+ // compression is enabled.
+ void LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch = no_reg);
+
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch = no_reg);
+
+ // Loads a field containing smi value and untags it.
+ void SmiUntagField(Register dst, const MemOperand& src);
+
+ // Compresses and stores tagged value to given on-heap location.
+ void StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand,
+ const Register& scratch = no_reg);
+
+ void DecompressTaggedSigned(Register destination, MemOperand field_operand);
+ void DecompressTaggedSigned(Register destination, Register src);
+ void DecompressTaggedPointer(Register destination, MemOperand field_operand);
+ void DecompressTaggedPointer(Register destination, Register source);
+ void DecompressAnyTagged(Register destination, MemOperand field_operand);
+ void DecompressAnyTagged(Register destination, Register source);
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1072,6 +1113,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Push(r0);
}
+ template <class T>
+ void CompareTagged(Register src1, T src2) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Cmp32(src1, src2);
+ } else {
+ CmpP(src1, src2);
+ }
+ }
+
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
@@ -1278,13 +1328,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done,
InvokeFlag flag);
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index dddd16d89c..21094ef3bc 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -38,7 +38,6 @@ namespace internal {
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
// Caller-saved/arguments registers
const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
@@ -87,17 +86,6 @@ const RegList kCalleeSavedDoubles = 1 << 8 | // d8
const int kNumCalleeSavedDoubles = 8;
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
// The following constants describe the stack frame linkage area as
// defined by the ABI.
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index b0c9d95456..0f03867331 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -4,6 +4,8 @@
#include "src/codegen/source-position-table.h"
+#include "src/base/export-template.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -153,8 +155,9 @@ void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
#endif
}
+template <typename LocalIsolate>
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
if (bytes_.empty()) return isolate->factory()->empty_byte_array();
DCHECK(!Omit());
@@ -165,7 +168,9 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
- SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll);
+ SourcePositionTableIterator it(
+ *table, SourcePositionTableIterator::kAll,
+ SourcePositionTableIterator::kDontSkipFunctionEntry);
CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
@@ -173,6 +178,13 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
return table;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
+ OffThreadIsolate* isolate);
+
OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
if (bytes_.empty()) return OwnedVector<byte>();
DCHECK(!Omit());
@@ -182,8 +194,9 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
- SourcePositionTableIterator it(table.as_vector(),
- SourcePositionTableIterator::kAll);
+ SourcePositionTableIterator it(
+ table.as_vector(), SourcePositionTableIterator::kAll,
+ SourcePositionTableIterator::kDontSkipFunctionEntry);
CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
@@ -191,16 +204,30 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
return table;
}
-SourcePositionTableIterator::SourcePositionTableIterator(ByteArray byte_array,
- IterationFilter filter)
- : raw_table_(VectorFromByteArray(byte_array)), filter_(filter) {
+void SourcePositionTableIterator::Initialize() {
Advance();
+ if (function_entry_filter_ == kSkipFunctionEntry &&
+ current_.code_offset == kFunctionEntryBytecodeOffset && !done()) {
+ Advance();
+ }
}
SourcePositionTableIterator::SourcePositionTableIterator(
- Handle<ByteArray> byte_array, IterationFilter filter)
- : table_(byte_array), filter_(filter) {
- Advance();
+ ByteArray byte_array, IterationFilter iteration_filter,
+ FunctionEntryFilter function_entry_filter)
+ : raw_table_(VectorFromByteArray(byte_array)),
+ iteration_filter_(iteration_filter),
+ function_entry_filter_(function_entry_filter) {
+ Initialize();
+}
+
+SourcePositionTableIterator::SourcePositionTableIterator(
+ Handle<ByteArray> byte_array, IterationFilter iteration_filter,
+ FunctionEntryFilter function_entry_filter)
+ : table_(byte_array),
+ iteration_filter_(iteration_filter),
+ function_entry_filter_(function_entry_filter) {
+ Initialize();
#ifdef DEBUG
// We can enable allocation because we keep the table in a handle.
no_gc.Release();
@@ -208,9 +235,12 @@ SourcePositionTableIterator::SourcePositionTableIterator(
}
SourcePositionTableIterator::SourcePositionTableIterator(
- Vector<const byte> bytes, IterationFilter filter)
- : raw_table_(bytes), filter_(filter) {
- Advance();
+ Vector<const byte> bytes, IterationFilter iteration_filter,
+ FunctionEntryFilter function_entry_filter)
+ : raw_table_(bytes),
+ iteration_filter_(iteration_filter),
+ function_entry_filter_(function_entry_filter) {
+ Initialize();
#ifdef DEBUG
// We can enable allocation because the underlying vector does not move.
no_gc.Release();
@@ -231,9 +261,10 @@ void SourcePositionTableIterator::Advance() {
DecodeEntry(bytes, &index_, &tmp);
AddAndSetEntry(&current_, tmp);
SourcePosition p = source_position();
- filter_satisfied = (filter_ == kAll) ||
- (filter_ == kJavaScriptOnly && p.IsJavaScript()) ||
- (filter_ == kExternalOnly && p.IsExternal());
+ filter_satisfied =
+ (iteration_filter_ == kAll) ||
+ (iteration_filter_ == kJavaScriptOnly && p.IsJavaScript()) ||
+ (iteration_filter_ == kExternalOnly && p.IsExternal());
}
}
}
diff --git a/deps/v8/src/codegen/source-position-table.h b/deps/v8/src/codegen/source-position-table.h
index b3bba3bc8a..024eca54fa 100644
--- a/deps/v8/src/codegen/source-position-table.h
+++ b/deps/v8/src/codegen/source-position-table.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SOURCE_POSITION_TABLE_H_
#define V8_CODEGEN_SOURCE_POSITION_TABLE_H_
+#include "src/base/export-template.h"
#include "src/codegen/source-position.h"
#include "src/common/assert-scope.h"
#include "src/common/checks.h"
@@ -23,7 +24,9 @@ class Zone;
struct PositionTableEntry {
PositionTableEntry()
- : code_offset(0), source_position(0), is_statement(false) {}
+ : code_offset(kFunctionEntryBytecodeOffset),
+ source_position(0),
+ is_statement(false) {}
PositionTableEntry(int offset, int64_t source, bool statement)
: code_offset(offset), source_position(source), is_statement(statement) {}
@@ -51,7 +54,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
- Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
OwnedVector<byte> ToSourcePositionTableVector();
inline bool Omit() const { return mode_ != RECORD_SOURCE_POSITIONS; }
@@ -70,13 +75,23 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
class V8_EXPORT_PRIVATE SourcePositionTableIterator {
public:
+ // Filter that applies when advancing the iterator. If the filter isn't
+ // satisfied, we advance the iterator again.
enum IterationFilter { kJavaScriptOnly = 0, kExternalOnly = 1, kAll = 2 };
+ // Filter that applies only to the first entry of the source position table.
+ // If it is kSkipFunctionEntry, it will skip the FunctionEntry entry if it
+ // exists.
+ enum FunctionEntryFilter {
+ kSkipFunctionEntry = 0,
+ kDontSkipFunctionEntry = 1
+ };
// Used for saving/restoring the iterator.
struct IndexAndPositionState {
int index_;
PositionTableEntry position_;
- IterationFilter filter_;
+ IterationFilter iteration_filter_;
+ FunctionEntryFilter function_entry_filter_;
};
// We expose three flavours of the iterator, depending on the argument passed
@@ -85,18 +100,23 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
// Handlified iterator allows allocation, but it needs a handle (and thus
// a handle scope). This is the preferred version.
explicit SourcePositionTableIterator(
- Handle<ByteArray> byte_array, IterationFilter filter = kJavaScriptOnly);
+ Handle<ByteArray> byte_array,
+ IterationFilter iteration_filter = kJavaScriptOnly,
+ FunctionEntryFilter function_entry_filter = kSkipFunctionEntry);
// Non-handlified iterator does not need a handle scope, but it disallows
// allocation during its lifetime. This is useful if there is no handle
// scope around.
explicit SourcePositionTableIterator(
- ByteArray byte_array, IterationFilter filter = kJavaScriptOnly);
+ ByteArray byte_array, IterationFilter iteration_filter = kJavaScriptOnly,
+ FunctionEntryFilter function_entry_filter = kSkipFunctionEntry);
// Handle-safe iterator based on an a vector located outside the garbage
// collected heap, allows allocation during its lifetime.
explicit SourcePositionTableIterator(
- Vector<const byte> bytes, IterationFilter filter = kJavaScriptOnly);
+ Vector<const byte> bytes,
+ IterationFilter iteration_filter = kJavaScriptOnly,
+ FunctionEntryFilter function_entry_filter = kSkipFunctionEntry);
void Advance();
@@ -114,22 +134,30 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
}
bool done() const { return index_ == kDone; }
- IndexAndPositionState GetState() const { return {index_, current_, filter_}; }
+ IndexAndPositionState GetState() const {
+ return {index_, current_, iteration_filter_, function_entry_filter_};
+ }
void RestoreState(const IndexAndPositionState& saved_state) {
index_ = saved_state.index_;
current_ = saved_state.position_;
- filter_ = saved_state.filter_;
+ iteration_filter_ = saved_state.iteration_filter_;
+ function_entry_filter_ = saved_state.function_entry_filter_;
}
private:
+ // Initializes the source position interator with the first valid bytecode.
+ // Also sets the FunctionEntry SourcePosition if it exists.
+ void Initialize();
+
static const int kDone = -1;
Vector<const byte> raw_table_;
Handle<ByteArray> table_;
int index_ = 0;
PositionTableEntry current_;
- IterationFilter filter_;
+ IterationFilter iteration_filter_;
+ FunctionEntryFilter function_entry_filter_;
DISALLOW_HEAP_ALLOCATION(no_gc)
};
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index 64a4272d17..ecf9afab48 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -13,6 +13,8 @@ namespace internal {
class HeapNumber;
class BigInt;
class Object;
+class Smi;
+class TaggedIndex;
namespace compiler {
@@ -130,6 +132,10 @@ template <>
struct MachineTypeOf<Smi> {
static constexpr MachineType value = MachineType::TaggedSigned();
};
+template <>
+struct MachineTypeOf<TaggedIndex> {
+ static constexpr MachineType value = MachineType::Pointer();
+};
template <class HeapObjectSubtype>
struct MachineTypeOf<HeapObjectSubtype,
typename std::enable_if<std::is_base_of<
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index bb38eb5554..26cf12d4eb 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -86,6 +86,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
virtual void LoadRoot(Register destination, RootIndex index) = 0;
virtual void Trap() = 0;
+ virtual void DebugBreak() = 0;
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
static int32_t RootRegisterOffsetForBuiltinIndex(int builtin_index);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index a058cbb24e..1106626a13 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3340,26 +3340,6 @@ void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3380,62 +3360,6 @@ void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x54);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::andpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x54);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x56);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::orpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x56);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::xorpd(XMMRegister dst, Operand src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
void Assembler::haddps(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 2392146065..3f58e3b428 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -901,6 +901,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE_INSTRUCTION_LIST_SS(DECLARE_SSE2_INSTRUCTION)
SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
SSE2_INSTRUCTION_LIST_SD(DECLARE_SSE2_INSTRUCTION)
+ SSE2_UNOP_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
#undef DECLARE_SSE2_INSTRUCTION
void sse2_instr(XMMRegister reg, byte imm8, byte prefix, byte escape,
@@ -928,6 +929,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
#undef DECLARE_SSE2_AVX_INSTRUCTION
+#define DECLARE_SSE2_UNOP_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
+ void v##instruction(XMMRegister dst, XMMRegister src) { \
+ vpd(0x##opcode, dst, xmm0, src); \
+ } \
+ void v##instruction(XMMRegister dst, Operand src) { \
+ vpd(0x##opcode, dst, xmm0, src); \
+ }
+
+ SSE2_UNOP_INSTRUCTION_LIST(DECLARE_SSE2_UNOP_AVX_INSTRUCTION)
+#undef DECLARE_SSE2_UNOP_AVX_INSTRUCTION
+
// SSE3
void lddqu(XMMRegister dst, Operand src);
void movddup(XMMRegister dst, Operand src);
@@ -949,6 +961,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
+ SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
#undef DECLARE_SSSE3_INSTRUCTION
// SSE4
@@ -972,7 +985,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
- SSE4_PMOV_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
+ SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
DECLARE_SSE4_INSTRUCTION(blendvpd, 66, 0F, 38, 15)
#undef DECLARE_SSE4_INSTRUCTION
@@ -1021,6 +1034,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_2_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
#undef DECLARE_SSE34_AVX_INSTRUCTION
+#define DECLARE_SSSE3_UNOP_AVX_INSTRUCTION(instruction, prefix, escape1, \
+ escape2, opcode) \
+ void v##instruction(XMMRegister dst, XMMRegister src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
+ } \
+ void v##instruction(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
+ }
+
+ SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_UNOP_AVX_INSTRUCTION)
+#undef DECLARE_SSSE3_UNOP_AVX_INSTRUCTION
+
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0);
@@ -1036,7 +1061,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, Operand src) { \
vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
}
- SSE4_PMOV_INSTRUCTION_LIST(DECLARE_SSE4_PMOV_AVX_INSTRUCTION)
+ SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_PMOV_AVX_INSTRUCTION)
#undef DECLARE_SSE4_PMOV_AVX_INSTRUCTION
void movd(XMMRegister dst, Register src);
@@ -1086,19 +1111,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, Operand src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, Operand src);
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
- void andpd(XMMRegister dst, XMMRegister src);
- void andpd(XMMRegister dst, Operand src);
- void orpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, Operand src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, Operand src);
-
void haddps(XMMRegister dst, XMMRegister src);
void haddps(XMMRegister dst, Operand src);
@@ -1224,10 +1240,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
impl(opcode, dst, src1, src2); \
}
- AVX_3(vandpd, 0x54, vpd)
- AVX_3(vorpd, 0x56, vpd)
- AVX_3(vxorpd, 0x57, vpd)
- AVX_3(vcvtsd2ss, 0x5a, vsd)
AVX_3(vhaddps, 0x7c, vsd)
#define AVX_SCALAR(instr, prefix, escape, opcode) \
@@ -1261,6 +1273,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
+ void vcvttps2dq(XMMRegister dst, XMMRegister src) {
+ vinstr(0x5b, dst, xmm0, src, kF3, k0F, kWIG);
+ }
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
@@ -1508,6 +1523,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
emit(imm8);
}
+ void vpshufd(XMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
emit(imm8);
@@ -1516,6 +1535,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
emit(imm8);
}
+ void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpshufhw(XMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
+ emit(imm8);
+ }
void vpblendw(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t mask) {
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 60b2c2b10c..a3389f1bb0 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -34,28 +34,18 @@
namespace v8 {
namespace internal {
-Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
DCHECK_GE(index, 0);
- int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
- int displacement_to_last_argument =
- base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
- displacement_to_last_argument += extra_displacement_to_last_argument_;
- if (argument_count_reg_ == no_reg) {
- // argument[0] is at base_reg_ + displacement_to_last_argument +
- // (argument_count_immediate_ + receiver - 1) * kSystemPointerSize.
- DCHECK_GT(argument_count_immediate_ + receiver, 0);
- return Operand(base_reg_,
- displacement_to_last_argument +
- (argument_count_immediate_ + receiver - 1 - index) *
- kSystemPointerSize);
- } else {
- // argument[0] is at base_reg_ + displacement_to_last_argument +
- // argument_count_reg_ * times_system_pointer_size + (receiver - 1) *
- // kSystemPointerSize.
- return Operand(base_reg_, argument_count_reg_, times_system_pointer_size,
- displacement_to_last_argument +
- (receiver - 1 - index) * kSystemPointerSize);
- }
+#ifdef V8_REVERSE_JSARGS
+ // arg[0] = rsp + kPCOnStackSize;
+ // arg[i] = arg[0] + i * kSystemPointerSize;
+ return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
+#else
+ // arg[0] = (rsp + kPCOnStackSize) + argc * kSystemPointerSize;
+ // arg[i] = arg[0] - i * kSystemPointerSize;
+ return Operand(rsp, argc_, times_system_pointer_size,
+ kPCOnStackSize - index * kSystemPointerSize);
+#endif
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
@@ -1411,6 +1401,31 @@ void TurboAssembler::Push(Handle<HeapObject> source) {
Push(kScratchRegister);
}
+void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+ PushArrayOrder order) {
+ DCHECK(!AreAliased(array, size, scratch));
+ Register counter = scratch;
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ Set(counter, 0);
+ jmp(&entry);
+ bind(&loop);
+ Push(Operand(array, counter, times_system_pointer_size, 0));
+ incq(counter);
+ bind(&entry);
+ cmpq(counter, size);
+ j(less, &loop, Label::kNear);
+ } else {
+ movq(counter, size);
+ jmp(&entry);
+ bind(&loop);
+ Push(Operand(array, counter, times_system_pointer_size, 0));
+ bind(&entry);
+ decq(counter);
+ j(greater_equal, &loop, Label::kNear);
+ }
+}
+
void TurboAssembler::Move(Register result, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
@@ -2078,7 +2093,7 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
DCHECK_NE(int32_register, kScratchRegister);
movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
- Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
+ Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
}
@@ -2371,8 +2386,15 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- Push(
- StackArgumentsAccessor(rbp, actual_parameter_count).GetReceiverOperand());
+ // Arguments are located 2 words below the base pointer.
+#ifdef V8_REVERSE_JSARGS
+ Operand receiver_op = Operand(rbp, kSystemPointerSize * 2);
+#else
+ Operand receiver_op =
+ Operand(rbp, actual_parameter_count, times_system_pointer_size,
+ kSystemPointerSize * 2);
+#endif
+ Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
@@ -2651,23 +2673,57 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
- if (isolate() != nullptr) {
- Label get_pc;
- DCHECK(!AreAliased(kScratchRegister, function));
- leaq(kScratchRegister, Operand(&get_pc, 0));
- bind(&get_pc);
+ Label get_pc;
+ DCHECK(!AreAliased(kScratchRegister, function));
+ leaq(kScratchRegister, Operand(&get_pc, 0));
+ bind(&get_pc);
+
+ // Addressing the following external references is tricky because we need
+ // this to work in three situations:
+ // 1. In wasm compilation, the isolate is nullptr and thus no
+ // ExternalReference can be created, but we can construct the address
+ // directly using the root register and a static offset.
+ // 2. In normal JIT (and builtin) compilation, the external reference is
+ // usually addressed through the root register, so we can use the direct
+ // offset directly in most cases.
+ // 3. In regexp compilation, the external reference is embedded into the reloc
+ // info.
+ // The solution here is to use root register offsets wherever possible in
+ // which case we can construct it directly. When falling back to external
+ // references we need to ensure that the scratch register does not get
+ // accidentally overwritten. If we run into more such cases in the future, we
+ // should implement a more general solution.
+ if (root_array_available()) {
+ movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()),
+ kScratchRegister);
+ movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
+ rbp);
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ // Use alternative scratch register in order not to overwrite
+ // kScratchRegister.
+ Register scratch = r12;
+ pushq(scratch);
+
movq(ExternalReferenceAsOperand(
- ExternalReference::fast_c_call_caller_pc_address(isolate())),
+ ExternalReference::fast_c_call_caller_pc_address(isolate()),
+ scratch),
kScratchRegister);
movq(ExternalReferenceAsOperand(
ExternalReference::fast_c_call_caller_fp_address(isolate())),
rbp);
+
+ popq(scratch);
}
call(function);
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
+ Immediate(0));
+ } else {
+ DCHECK_NOT_NULL(isolate());
movq(ExternalReferenceAsOperand(
ExternalReference::fast_c_call_caller_fp_address(isolate())),
Immediate(0));
@@ -2712,7 +2768,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Set(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
+ Label* exit, DeoptimizeKind kind) {
+ USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r13 (we don't need the roots array from now on).
movq(r13, Immediate(deopt_id));
@@ -2720,6 +2778,7 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
}
void TurboAssembler::Trap() { int3(); }
+void TurboAssembler::DebugBreak() { int3(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 84244ea455..da9ab5da32 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -33,36 +33,22 @@ struct SmiIndex {
ScaleFactor scale;
};
-enum StackArgumentsAccessorReceiverMode {
- ARGUMENTS_CONTAIN_RECEIVER,
- ARGUMENTS_DONT_CONTAIN_RECEIVER
-};
-
+// Convenient class to access arguments below the stack pointer.
class StackArgumentsAccessor {
public:
- StackArgumentsAccessor(Register base_reg, Register argument_count_reg,
- StackArgumentsAccessorReceiverMode receiver_mode =
- ARGUMENTS_CONTAIN_RECEIVER,
- int extra_displacement_to_last_argument = 0)
- : base_reg_(base_reg),
- argument_count_reg_(argument_count_reg),
- argument_count_immediate_(0),
- receiver_mode_(receiver_mode),
- extra_displacement_to_last_argument_(
- extra_displacement_to_last_argument) {}
-
- Operand GetArgumentOperand(int index);
- Operand GetReceiverOperand() {
- DCHECK(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
- return GetArgumentOperand(0);
+ // argc = the number of arguments not including the receiver.
+ explicit StackArgumentsAccessor(Register argc) : argc_(argc) {
+ DCHECK_NE(argc_, no_reg);
}
+ // Argument 0 is the receiver (despite argc not including the receiver).
+ Operand operator[](int index) const { return GetArgumentOperand(index); }
+
+ Operand GetArgumentOperand(int index) const;
+ Operand GetReceiverOperand() const { return GetArgumentOperand(0); }
+
private:
- const Register base_reg_;
- const Register argument_count_reg_;
- const int argument_count_immediate_;
- const StackArgumentsAccessorReceiverMode receiver_mode_;
- const int extra_displacement_to_last_argument_;
+ const Register argc_;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
@@ -158,7 +144,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
AVX_OP(Movdqu, movdqu)
+ AVX_OP(Pcmpeqb, pcmpeqb)
+ AVX_OP(Pcmpeqw, pcmpeqw)
AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pcmpgtb, pcmpgtb)
+ AVX_OP(Pcmpgtw, pcmpgtw)
+ AVX_OP(Pmaxsw, pmaxsw)
+ AVX_OP(Pmaxub, pmaxub)
+ AVX_OP(Pminsw, pminsw)
+ AVX_OP(Pminub, pminub)
AVX_OP(Addss, addss)
AVX_OP(Addsd, addsd)
AVX_OP(Mulsd, mulsd)
@@ -185,22 +179,40 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Sqrtps, sqrtps)
AVX_OP(Sqrtpd, sqrtpd)
+ AVX_OP(Cvttps2dq, cvttps2dq)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Paddusb, paddusb)
AVX_OP(Pand, pand)
AVX_OP(Por, por)
AVX_OP(Pxor, pxor)
+ AVX_OP(Psubb, psubb)
+ AVX_OP(Psubw, psubw)
AVX_OP(Psubd, psubd)
AVX_OP(Psubq, psubq)
+ AVX_OP(Psubsb, psubsb)
+ AVX_OP(Psubsw, psubsw)
+ AVX_OP(Psubusb, psubusb)
+ AVX_OP(Psubusw, psubusw)
AVX_OP(Pslld, pslld)
AVX_OP(Pavgb, pavgb)
AVX_OP(Pavgw, pavgw)
+ AVX_OP(Psraw, psraw)
AVX_OP(Psrad, psrad)
+ AVX_OP(Psllw, psllw)
+ AVX_OP(Psllq, psllq)
+ AVX_OP(Psrlw, psrlw)
AVX_OP(Psrld, psrld)
+ AVX_OP(Psrlq, psrlq)
+ AVX_OP(Paddb, paddb)
+ AVX_OP(Paddw, paddw)
AVX_OP(Paddd, paddd)
AVX_OP(Paddq, paddq)
+ AVX_OP(Paddsb, paddsb)
+ AVX_OP(Paddsw, paddsw)
+ AVX_OP(Paddusb, paddusb)
+ AVX_OP(Paddusw, paddusw)
AVX_OP(Pcmpgtd, pcmpgtd)
+ AVX_OP(Pmullw, pmullw)
AVX_OP(Pmuludq, pmuludq)
AVX_OP(Addpd, addpd)
AVX_OP(Subpd, subpd)
@@ -220,25 +232,49 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Mulps, mulps)
AVX_OP(Divps, divps)
AVX_OP(Pshuflw, pshuflw)
+ AVX_OP(Pshufhw, pshufhw)
+ AVX_OP(Packsswb, packsswb)
+ AVX_OP(Packuswb, packuswb)
+ AVX_OP(Packssdw, packssdw)
+ AVX_OP(Punpcklbw, punpcklbw)
+ AVX_OP(Punpcklwd, punpcklwd)
+ AVX_OP(Punpckldq, punpckldq)
+ AVX_OP(Punpckhbw, punpckhbw)
+ AVX_OP(Punpckhwd, punpckhwd)
+ AVX_OP(Punpckhdq, punpckhdq)
AVX_OP(Punpcklqdq, punpcklqdq)
+ AVX_OP(Punpckhqdq, punpckhqdq)
AVX_OP(Pshufd, pshufd)
AVX_OP(Cmpps, cmpps)
AVX_OP(Cmppd, cmppd)
AVX_OP(Movlhps, movlhps)
AVX_OP_SSE3(Movddup, movddup)
+ AVX_OP_SSSE3(Phaddd, phaddd)
+ AVX_OP_SSSE3(Phaddw, phaddw)
AVX_OP_SSSE3(Pshufb, pshufb)
+ AVX_OP_SSSE3(Psignb, psignb)
+ AVX_OP_SSSE3(Psignw, psignw)
AVX_OP_SSSE3(Psignd, psignd)
AVX_OP_SSSE3(Palignr, palignr)
+ AVX_OP_SSSE3(Pabsb, pabsb)
+ AVX_OP_SSSE3(Pabsw, pabsw)
+ AVX_OP_SSSE3(Pabsd, pabsd)
AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
- AVX_OP_SSE4_1(Pmulld, pmulld)
+ AVX_OP_SSE4_1(Packusdw, packusdw)
+ AVX_OP_SSE4_1(Pminsb, pminsb)
AVX_OP_SSE4_1(Pminsd, pminsd)
+ AVX_OP_SSE4_1(Pminuw, pminuw)
AVX_OP_SSE4_1(Pminud, pminud)
+ AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
+ AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pmaxud, pmaxud)
+ AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Extractps, extractps)
AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Pinsrq, pinsrq)
AVX_OP_SSE4_1(Pblendw, pblendw)
+ AVX_OP_SSE4_1(Ptest, ptest)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
@@ -276,6 +312,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Smi smi);
void Push(Handle<HeapObject> source);
+ enum class PushArrayOrder { kNormal, kReverse };
+ // `array` points to the first element (the lowest address).
+ // `array` and `size` are not modified.
+ void PushArray(Register array, Register size, Register scratch,
+ PushArrayOrder order = PushArrayOrder::kNormal);
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
// etc., not pushed. The argument count assumes all arguments are word sized.
@@ -460,9 +502,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
- void CallForDeoptimization(Address target, int deopt_id);
+ void CallForDeoptimization(Address target, int deopt_id, Label* exit,
+ DeoptimizeKind kind);
void Trap() override;
+ void DebugBreak() override;
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
@@ -475,7 +519,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pinsrb(XMMRegister dst, Register src, int8_t imm8);
void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
+ void Psllq(XMMRegister dst, int imm8) { Psllq(dst, static_cast<byte>(imm8)); }
void Psllq(XMMRegister dst, byte imm8);
+ void Psrlq(XMMRegister dst, int imm8) { Psrlq(dst, static_cast<byte>(imm8)); }
void Psrlq(XMMRegister dst, byte imm8);
void Pslld(XMMRegister dst, byte imm8);
void Psrld(XMMRegister dst, byte imm8);
@@ -586,6 +632,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
// ---------------------------------------------------------------------------
// Pointer compression support
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 6fba5c070c..74ec16d6a2 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -36,11 +36,12 @@
V(maxss, F3, 0F, 5F)
#define SSE2_INSTRUCTION_LIST(V) \
- V(sqrtpd, 66, 0F, 51) \
+ V(andpd, 66, 0F, 54) \
V(andnpd, 66, 0F, 55) \
+ V(orpd, 66, 0F, 56) \
+ V(xorpd, 66, 0F, 57) \
V(addpd, 66, 0F, 58) \
V(mulpd, 66, 0F, 59) \
- V(cvtps2dq, 66, 0F, 5B) \
V(subpd, 66, 0F, 5C) \
V(minpd, 66, 0F, 5D) \
V(maxpd, 66, 0F, 5F) \
@@ -98,6 +99,11 @@
V(por, 66, 0F, EB) \
V(pxor, 66, 0F, EF)
+// SSE2 instructions whose AVX version has two operands.
+#define SSE2_UNOP_INSTRUCTION_LIST(V) \
+ V(sqrtpd, 66, 0F, 51) \
+ V(cvtps2dq, 66, 0F, 5B)
+
// SSE2 shift instructions with an immediate operand. The last element is the
// extension to the opcode.
#define SSE2_INSTRUCTION_LIST_SHIFT_IMM(V) \
@@ -115,15 +121,13 @@
V(sqrtsd, F2, 0F, 51) \
V(addsd, F2, 0F, 58) \
V(mulsd, F2, 0F, 59) \
+ V(cvtsd2ss, F2, 0F, 5A) \
V(subsd, F2, 0F, 5C) \
V(minsd, F2, 0F, 5D) \
V(divsd, F2, 0F, 5E) \
V(maxsd, F2, 0F, 5F)
#define SSSE3_INSTRUCTION_LIST(V) \
- V(pabsb, 66, 0F, 38, 1C) \
- V(pabsw, 66, 0F, 38, 1D) \
- V(pabsd, 66, 0F, 38, 1E) \
V(phaddd, 66, 0F, 38, 02) \
V(phaddw, 66, 0F, 38, 01) \
V(pshufb, 66, 0F, 38, 00) \
@@ -131,9 +135,14 @@
V(psignw, 66, 0F, 38, 09) \
V(psignd, 66, 0F, 38, 0A)
+// SSSE3 instructions whose AVX version has two operands.
+#define SSSE3_UNOP_INSTRUCTION_LIST(V) \
+ V(pabsb, 66, 0F, 38, 1C) \
+ V(pabsw, 66, 0F, 38, 1D) \
+ V(pabsd, 66, 0F, 38, 1E)
+
#define SSE4_INSTRUCTION_LIST(V) \
V(pcmpeqq, 66, 0F, 38, 29) \
- V(ptest, 66, 0F, 38, 17) \
V(packusdw, 66, 0F, 38, 2B) \
V(pminsb, 66, 0F, 38, 38) \
V(pminsd, 66, 0F, 38, 39) \
@@ -146,7 +155,8 @@
V(pmulld, 66, 0F, 38, 40)
// SSE instructions whose AVX version has two operands.
-#define SSE4_PMOV_INSTRUCTION_LIST(V) \
+#define SSE4_UNOP_INSTRUCTION_LIST(V) \
+ V(ptest, 66, 0F, 38, 17) \
V(pmovsxbw, 66, 0F, 38, 20) \
V(pmovsxwd, 66, 0F, 38, 23) \
V(pmovsxdq, 66, 0F, 38, 25) \
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 44441e0fc9..c79b3b633c 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -46,6 +46,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
@@ -59,7 +62,7 @@ constexpr int GB = MB * 1024;
// Determine whether the architecture uses an embedded constant pool
// (contiguous constant pool embedded in code object).
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#define V8_EMBEDDED_CONSTANT_POOL true
#else
#define V8_EMBEDDED_CONSTANT_POOL false
@@ -191,8 +194,9 @@ constexpr size_t kMaxWasmCodeSpaceSize = kMaxWasmCodeMemory;
constexpr int kSystemPointerSizeLog2 = 3;
constexpr intptr_t kIntptrSignBit =
static_cast<intptr_t>(uintptr_t{0x8000000000000000});
-constexpr bool kRequiresCodeRange = true;
-#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+constexpr bool kPlatformRequiresCodeRange = true;
+#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
+ (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64) && V8_OS_LINUX
constexpr size_t kMaximalCodeRangeSize = 512 * MB;
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_ARM64
@@ -212,18 +216,19 @@ constexpr size_t kReservedCodeRangePages = 0;
#else
constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
-#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-constexpr bool kRequiresCodeRange = false;
+#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
+ (V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64) && V8_OS_LINUX
+constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_MIPS
-constexpr bool kRequiresCodeRange = false;
+constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#else
-constexpr bool kRequiresCodeRange = false;
+constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
@@ -342,7 +347,7 @@ F FUNCTION_CAST(Address addr) {
// Determine whether the architecture uses function descriptors
// which provide a level of indirection between the function pointer
// and the function entrypoint.
-#if V8_HOST_ARCH_PPC && \
+#if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
(V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
(!defined(_CALL_ELF) || _CALL_ELF == 1)))
#define USES_FUNCTION_DESCRIPTORS 1
@@ -418,6 +423,10 @@ enum ArgvMode { kArgvOnStack, kArgvInRegister };
// This constant is used as an undefined value when passing source positions.
constexpr int kNoSourcePosition = -1;
+// This constant is used to signal the function entry implicit stack check
+// bytecode offset.
+constexpr int kFunctionEntryBytecodeOffset = -1;
+
// This constant is used to indicate missing deoptimization information.
constexpr int kNoDeoptimizationId = -1;
@@ -577,11 +586,19 @@ enum class HeapObjectReferenceType {
STRONG,
};
+enum class ArgumentsType {
+ kRuntime,
+ kJS,
+};
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
class AccessorInfo;
+template <ArgumentsType>
class Arguments;
+using RuntimeArguments = Arguments<ArgumentsType::kRuntime>;
+using JavaScriptArguments = Arguments<ArgumentsType::kJS>;
class Assembler;
class ClassScope;
class Code;
@@ -811,6 +828,10 @@ enum class REPLMode {
kNo,
};
+inline REPLMode construct_repl_mode(bool is_repl_mode) {
+ return is_repl_mode ? REPLMode::kYes : REPLMode::kNo;
+}
+
// Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, INSPECTOR_CODE };
@@ -1575,7 +1596,9 @@ enum class LoadSensitivity {
V(TrapFuncSigMismatch) \
V(TrapDataSegmentDropped) \
V(TrapElemSegmentDropped) \
- V(TrapTableOutOfBounds)
+ V(TrapTableOutOfBounds) \
+ V(TrapBrOnExnNullRef) \
+ V(TrapRethrowNullRef)
enum KeyedAccessLoadMode {
STANDARD_LOAD,
@@ -1622,6 +1645,8 @@ constexpr int kFunctionLiteralIdTopLevel = 0;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
+static const uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+
// Opaque data type for identifying stack frames. Used extensively
// by the debugger.
// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 8c10c42b7b..13d5310d92 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -415,6 +415,7 @@ namespace internal {
"Invalid left-hand side expression in prefix operation") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
+ T(InvalidPrivateBrand, "Object must be an instance of class %") \
T(InvalidPrivateFieldResolution, \
"Private field '%' must be declared in an enclosing class") \
T(InvalidPrivateMemberRead, \
@@ -426,6 +427,8 @@ namespace internal {
T(InvalidPrivateMethodWrite, "Private method '%' is not writable") \
T(InvalidPrivateGetterAccess, "'%' was defined without a getter") \
T(InvalidPrivateSetterAccess, "'%' was defined without a setter") \
+ T(InvalidUnusedPrivateStaticMethodAccessedByDebugger, \
+ "Unused static private method '%' cannot be accessed at debug time") \
T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
@@ -549,6 +552,8 @@ namespace internal {
T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
T(WasmTrapTableOutOfBounds, "table access out of bounds") \
+ T(WasmTrapBrOnExnNullRef, "br_on_exn on nullref value") \
+ T(WasmTrapRethrowNullRef, "rethrowing nullref value") \
T(WasmExceptionError, "wasm exception") \
/* Asm.js validation related */ \
T(AsmJsInvalid, "Invalid asm.js: %") \
@@ -578,12 +583,12 @@ namespace internal {
T(WeakRefsUnregisterTokenMustBeObject, \
"unregisterToken ('%') must be an object") \
T(WeakRefsCleanupMustBeCallable, \
- "FinalizationGroup: cleanup must be callable") \
+ "FinalizationRegistry: cleanup must be callable") \
T(WeakRefsRegisterTargetMustBeObject, \
- "FinalizationGroup.prototype.register: target must be an object") \
+ "FinalizationRegistry.prototype.register: target must be an object") \
T(WeakRefsRegisterTargetAndHoldingsMustNotBeSame, \
- "FinalizationGroup.prototype.register: target and holdings must not be " \
- "same") \
+ "FinalizationRegistry.prototype.register: target and holdings must not " \
+ "be same") \
T(WeakRefsWeakRefConstructorTargetMustBeObject, \
"WeakRef: target must be an object") \
T(OptionalChainingNoNew, "Invalid optional chain from new expression") \
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index a82eb4b35c..5b0c74799b 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -507,6 +507,45 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
i.InputSimd128Register(1).high()); \
} while (0)
+// If shift value is an immediate, we can call asm_imm, taking the shift value
+// modulo 2^width. Otherwise, emit code to perform the modulus operation, and
+// call vshl.
+#define ASSEMBLE_SIMD_SHIFT_LEFT(asm_imm, width, sz, dt) \
+ do { \
+ QwNeonRegister dst = i.OutputSimd128Register(); \
+ QwNeonRegister src = i.InputSimd128Register(0); \
+ if (instr->InputAt(1)->IsImmediate()) { \
+ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \
+ } else { \
+ QwNeonRegister tmp = i.TempSimd128Register(0); \
+ Register shift = i.TempRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ and_(shift, i.InputRegister(1), Operand(mask)); \
+ __ vdup(sz, tmp, shift); \
+ __ vshl(dt, dst, src, tmp); \
+ } \
+ } while (0)
+
+// If shift value is an immediate, we can call asm_imm, taking the shift value
+// modulo 2^width. Otherwise, emit code to perform the modulus operation, and
+// call vshl, passing in the negative shift value (treated as a right shift).
+#define ASSEMBLE_SIMD_SHIFT_RIGHT(asm_imm, width, sz, dt) \
+ do { \
+ QwNeonRegister dst = i.OutputSimd128Register(); \
+ QwNeonRegister src = i.InputSimd128Register(0); \
+ if (instr->InputAt(1)->IsImmediate()) { \
+ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \
+ } else { \
+ QwNeonRegister tmp = i.TempSimd128Register(0); \
+ Register shift = i.TempRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ and_(shift, i.InputRegister(1), Operand(mask)); \
+ __ vdup(sz, tmp, shift); \
+ __ vneg(sz, tmp, tmp); \
+ __ vshl(dt, dst, src, tmp); \
+ } \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -890,10 +929,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -912,7 +947,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
- __ stop();
+ __ DebugBreak();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -2039,38 +2074,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Shl: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 64.
- __ and_(shift, i.InputRegister(1), Operand(63));
- // Only the least significant byte of each lane is used.
- __ vdup(Neon32, tmp, shift);
- __ vshl(NeonS64, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_LEFT(vshl, 6, Neon32, NeonS64);
break;
}
case kArmI64x2ShrS: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 64.
- __ and_(shift, i.InputRegister(1), Operand(63));
- // Only the least significant byte of each lane is used.
- __ vdup(Neon32, tmp, shift);
- __ vneg(Neon32, tmp, tmp);
- __ vshl(NeonS64, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ // Only the least significant byte of each lane is used, so we can use
+ // Neon32 as the size.
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 6, Neon32, NeonS64);
break;
}
case kArmI64x2ShrU: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 64.
- __ and_(shift, i.InputRegister(1), Operand(63));
- // Only the least significant byte of each lane is used.
- __ vdup(Neon32, tmp, shift);
- __ vneg(Neon32, tmp, tmp);
- __ vshl(NeonU64, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ // Only the least significant byte of each lane is used, so we can use
+ // Neon32 as the size.
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 6, Neon32, NeonU64);
break;
}
case kArmF32x4Splat: {
@@ -2238,24 +2254,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI32x4Shl: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, i.InputRegister(1), Operand(31));
- __ vdup(Neon32, tmp, shift);
- __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_LEFT(vshl, 5, Neon32, NeonS32);
break;
}
case kArmI32x4ShrS: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, i.InputRegister(1), Operand(31));
- __ vdup(Neon32, tmp, shift);
- __ vneg(Neon32, tmp, tmp);
- __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 5, Neon32, NeonS32);
break;
}
case kArmI32x4Add: {
@@ -2323,14 +2326,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI32x4ShrU: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, i.InputRegister(1), Operand(31));
- __ vdup(Neon32, tmp, shift);
- __ vneg(Neon32, tmp, tmp);
- __ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 5, Neon32, NeonU32);
break;
}
case kArmI32x4MinU: {
@@ -2353,6 +2349,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI32x4Abs: {
+ __ vabs(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI32x4BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register tmp2 = i.TempSimd128Register(0);
+ Simd128Register mask = i.TempSimd128Register(1);
+
+ __ vshr(NeonS32, tmp2, src, 31);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ __ vmov(mask.low(), Double((uint64_t)0x0000'0002'0000'0001));
+ __ vmov(mask.high(), Double((uint64_t)0x0000'0008'0000'0004));
+ __ vand(tmp2, mask, tmp2);
+ __ vpadd(Neon32, tmp2.low(), tmp2.low(), tmp2.high());
+ __ vpadd(Neon32, tmp2.low(), tmp2.low(), kDoubleRegZero);
+ __ VmovLow(dst, tmp2.low());
+ break;
+ }
case kArmI16x8Splat: {
__ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2387,24 +2404,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8Shl: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, i.InputRegister(1), Operand(15));
- __ vdup(Neon16, tmp, shift);
- __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_LEFT(vshl, 4, Neon16, NeonS16);
break;
}
case kArmI16x8ShrS: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, i.InputRegister(1), Operand(15));
- __ vdup(Neon16, tmp, shift);
- __ vneg(Neon16, tmp, tmp);
- __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 4, Neon16, NeonS16);
break;
}
case kArmI16x8SConvertI32x4:
@@ -2481,14 +2485,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8ShrU: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, i.InputRegister(1), Operand(15));
- __ vdup(Neon16, tmp, shift);
- __ vneg(Neon16, tmp, tmp);
- __ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 4, Neon16, NeonU16);
break;
}
case kArmI16x8UConvertI32x4:
@@ -2529,6 +2526,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI16x8Abs: {
+ __ vabs(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI16x8BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register tmp2 = i.TempSimd128Register(0);
+ Simd128Register mask = i.TempSimd128Register(1);
+
+ __ vshr(NeonS16, tmp2, src, 15);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ __ vmov(mask.low(), Double((uint64_t)0x0008'0004'0002'0001));
+ __ vmov(mask.high(), Double((uint64_t)0x0080'0040'0020'0010));
+ __ vand(tmp2, mask, tmp2);
+ __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
+ __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
+ __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
+ __ vmov(NeonU16, dst, tmp2.low(), 0);
+ break;
+ }
case kArmI8x16Splat: {
__ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2553,24 +2572,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16Shl: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 8.
- __ and_(shift, i.InputRegister(1), Operand(7));
- __ vdup(Neon8, tmp, i.InputRegister(1));
- __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_LEFT(vshl, 3, Neon8, NeonS8);
break;
}
case kArmI8x16ShrS: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 8.
- __ and_(shift, i.InputRegister(1), Operand(7));
- __ vdup(Neon8, tmp, shift);
- __ vneg(Neon8, tmp, tmp);
- __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 3, Neon8, NeonS8);
break;
}
case kArmI8x16SConvertI16x8:
@@ -2633,14 +2639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16ShrU: {
- QwNeonRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 8.
- __ and_(shift, i.InputRegister(1), Operand(7));
- __ vdup(Neon8, tmp, shift);
- __ vneg(Neon8, tmp, tmp);
- __ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
- tmp);
+ ASSEMBLE_SIMD_SHIFT_RIGHT(vshr, 3, Neon8, NeonU8);
break;
}
case kArmI8x16UConvertI16x8:
@@ -2681,6 +2680,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI8x16Abs: {
+ __ vabs(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmI8x16BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register tmp2 = i.TempSimd128Register(0);
+ Simd128Register mask = i.TempSimd128Register(1);
+
+ __ vshr(NeonS8, tmp2, src, 7);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ __ vmov(mask.low(), Double((uint64_t)0x8040'2010'0804'0201));
+ __ vmov(mask.high(), Double((uint64_t)0x8040'2010'0804'0201));
+ __ vand(tmp2, mask, tmp2);
+ __ vext(mask, tmp2, tmp2, 8);
+ __ vzip(Neon8, mask, tmp2);
+ __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
+ __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
+ __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
+ __ vmov(NeonU16, dst, tmp2.low(), 0);
+ break;
+ }
case kArmS128Zero: {
__ veor(i.OutputSimd128Register(), i.OutputSimd128Register(),
i.OutputSimd128Register());
@@ -3335,6 +3358,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_NEON_NARROWING_OP
#undef ASSEMBLE_NEON_PAIRWISE_OP
+#undef ASSEMBLE_SIMD_SHIFT_LEFT
+#undef ASSEMBLE_SIMD_SHIFT_RIGHT
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -3451,16 +3476,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- ArmOperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ cmp(input, Operand(i.InputInt32(index + 0)));
- __ b(eq, GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
ArmOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3524,7 +3539,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 8173d32833..c6365bf7a5 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -201,6 +201,8 @@ namespace compiler {
V(ArmI32x4MaxU) \
V(ArmI32x4GtU) \
V(ArmI32x4GeU) \
+ V(ArmI32x4Abs) \
+ V(ArmI32x4BitMask) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLaneS) \
V(ArmI16x8ReplaceLane) \
@@ -234,6 +236,8 @@ namespace compiler {
V(ArmI16x8GtU) \
V(ArmI16x8GeU) \
V(ArmI16x8RoundingAverageU) \
+ V(ArmI16x8Abs) \
+ V(ArmI16x8BitMask) \
V(ArmI8x16Splat) \
V(ArmI8x16ExtractLaneS) \
V(ArmI8x16ReplaceLane) \
@@ -262,6 +266,8 @@ namespace compiler {
V(ArmI8x16GtU) \
V(ArmI8x16GeU) \
V(ArmI8x16RoundingAverageU) \
+ V(ArmI8x16Abs) \
+ V(ArmI8x16BitMask) \
V(ArmS128Zero) \
V(ArmS128Dup) \
V(ArmS128And) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 4c0be32d8a..8c09acd6df 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -181,6 +181,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4MaxU:
case kArmI32x4GtU:
case kArmI32x4GeU:
+ case kArmI32x4Abs:
+ case kArmI32x4BitMask:
case kArmI16x8Splat:
case kArmI16x8ExtractLaneS:
case kArmI16x8ReplaceLane:
@@ -214,6 +216,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8GtU:
case kArmI16x8GeU:
case kArmI16x8RoundingAverageU:
+ case kArmI16x8Abs:
+ case kArmI16x8BitMask:
case kArmI8x16Splat:
case kArmI8x16ExtractLaneS:
case kArmI8x16ReplaceLane:
@@ -242,6 +246,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16GtU:
case kArmI8x16GeU:
case kArmI8x16RoundingAverageU:
+ case kArmI8x16Abs:
+ case kArmI8x16BitMask:
case kArmS128Zero:
case kArmS128Dup:
case kArmS128And:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 0027c7a570..38a4dd8db4 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -92,12 +92,23 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
}
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+ Node* node, int width) {
ArmOperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ Int32Matcher m(node->InputAt(1));
+ if (m.HasValue()) {
+ if (m.IsMultipleOf(width)) {
+ selector->EmitIdentity(node);
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ }
+ } else {
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ }
}
void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
@@ -2004,7 +2015,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range();
@@ -2458,24 +2469,24 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
AddressingModeField::encode(addressing_mode);
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ InstructionOperand temps[4];
+ size_t temp_count = 0;
+ temps[temp_count++] = g.TempRegister();
+ temps[temp_count++] = g.TempRegister();
+ if (projection0) {
+ outputs[output_count++] = g.DefineAsFixed(projection0, r2);
+ } else {
+ temps[temp_count++] = g.TempRegister(r2);
+ }
if (projection1) {
- InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r2),
- g.DefineAsFixed(projection1, r3)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
- } else if (projection0) {
- InstructionOperand outputs[] = {
- g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister(r3)};
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
+ outputs[output_count++] = g.DefineAsFixed(projection1, r3);
} else {
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister(r2), g.TempRegister(r3)};
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ temps[temp_count++] = g.TempRegister(r3);
}
+ Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
+ temps);
}
#define SIMD_TYPE_LIST(V) \
@@ -2501,12 +2512,15 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \
V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \
+ V(I32x4Abs, kArmI32x4Abs) \
V(I16x8SConvertI8x16Low, kArmI16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kArmI16x8SConvertI8x16High) \
V(I16x8Neg, kArmI16x8Neg) \
V(I16x8UConvertI8x16Low, kArmI16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \
+ V(I16x8Abs, kArmI16x8Abs) \
V(I8x16Neg, kArmI8x16Neg) \
+ V(I8x16Abs, kArmI8x16Abs) \
V(S128Not, kArmS128Not) \
V(S1x4AnyTrue, kArmS1x4AnyTrue) \
V(S1x4AllTrue, kArmS1x4AllTrue) \
@@ -2516,18 +2530,18 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(S1x16AllTrue, kArmS1x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
- V(I64x2Shl) \
- V(I64x2ShrS) \
- V(I64x2ShrU) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8ShrU) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
- V(I8x16ShrU)
+ V(I64x2Shl, 64) \
+ V(I64x2ShrS, 64) \
+ V(I64x2ShrU, 64) \
+ V(I32x4Shl, 32) \
+ V(I32x4ShrS, 32) \
+ V(I32x4ShrU, 32) \
+ V(I16x8Shl, 16) \
+ V(I16x8ShrS, 16) \
+ V(I16x8ShrU, 16) \
+ V(I8x16Shl, 8) \
+ V(I8x16ShrS, 8) \
+ V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, kArmF64x2Add) \
@@ -2655,9 +2669,9 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
#undef SIMD_UNOP_LIST
-#define SIMD_VISIT_SHIFT_OP(Name) \
+#define SIMD_VISIT_SHIFT_OP(Name, width) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitSimdShiftRRR(this, kArm##Name, node); \
+ VisitSimdShiftRRR(this, kArm##Name, node, width); \
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
@@ -2901,6 +2915,29 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+namespace {
+template <ArchOpcode opcode>
+void VisitBitMask(InstructionSelector* selector, Node* node) {
+ ArmOperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+}
+} // namespace
+
+void InstructionSelector::VisitI8x16BitMask(Node* node) {
+ VisitBitMask<kArmI8x16BitMask>(this, node);
+}
+
+void InstructionSelector::VisitI16x8BitMask(Node* node) {
+ VisitBitMask<kArmI16x8BitMask>(this, node);
+}
+
+void InstructionSelector::VisitI32x4BitMask(Node* node) {
+ VisitBitMask<kArmI32x4BitMask>(this, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 7ab1ce07d5..f01181e955 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -291,7 +291,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Push(lr, padreg);
+ __ Push<TurboAssembler::kSignLR>(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
@@ -307,7 +307,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
save_fp_mode);
}
if (must_save_lr_) {
- __ Pop(padreg, lr);
+ __ Pop<TurboAssembler::kAuthLR>(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -494,20 +494,55 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
} while (0)
+// If shift value is an immediate, we can call asm_imm, taking the shift value
+// modulo 2^width. Otherwise, emit code to perform the modulus operation, and
+// call asm_shl.
+#define ASSEMBLE_SIMD_SHIFT_LEFT(asm_imm, width, format, asm_shl, gp) \
+ do { \
+ if (instr->InputAt(1)->IsImmediate()) { \
+ __ asm_imm(i.OutputSimd128Register().format(), \
+ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
+ } else { \
+ VRegister tmp = i.TempSimd128Register(0); \
+ Register shift = i.TempRegister(1).gp(); \
+ constexpr int mask = (1 << width) - 1; \
+ __ And(shift, i.InputRegister32(1), mask); \
+ __ Dup(tmp.format(), shift); \
+ __ asm_shl(i.OutputSimd128Register().format(), \
+ i.InputSimd128Register(0).format(), tmp.format()); \
+ } \
+ } while (0)
+
+// If shift value is an immediate, we can call asm_imm, taking the shift value
+// modulo 2^width. Otherwise, emit code to perform the modulus operation, and
+// call asm_shl, passing in the negative shift value (treated as right shift).
+#define ASSEMBLE_SIMD_SHIFT_RIGHT(asm_imm, width, format, asm_shl, gp) \
+ do { \
+ if (instr->InputAt(1)->IsImmediate()) { \
+ __ asm_imm(i.OutputSimd128Register().format(), \
+ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
+ } else { \
+ VRegister tmp = i.TempSimd128Register(0); \
+ Register shift = i.TempRegister(1).gp(); \
+ constexpr int mask = (1 << width) - 1; \
+ __ And(shift, i.InputRegister32(1), mask); \
+ __ Dup(tmp.format(), shift); \
+ __ Neg(tmp.format(), tmp.format()); \
+ __ asm_shl(i.OutputSimd128Register().format(), \
+ i.InputSimd128Register(0).format(), tmp.format()); \
+ } \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(sp, fp);
- __ Pop(fp, lr);
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
}
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
- static_assert(
- StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
- StandardFrameConstants::kCallerPCOffset,
- "Offsets must be consecutive for ldp!");
- __ Ldp(fp, lr, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ RestoreFPAndLR();
}
frame_access_state()->SetFrameAccessToSP();
}
@@ -779,10 +814,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label return_location;
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
- Register scratch = x8;
- __ Adr(scratch, &return_location);
- __ Str(scratch,
- MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ StoreReturnAddressInWasmExitFrame(&return_location);
}
if (instr->InputAt(0)->IsImmediate()) {
@@ -824,9 +856,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
case kArchAbortCSAAssert:
DCHECK_EQ(i.InputRegister(0), x1);
{
@@ -841,7 +870,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
- __ Debug("kArchDebugBreak", 0, BREAK);
+ __ DebugBreak();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
@@ -894,7 +923,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
- i.InputDoubleRegister(0), DetermineStubCallMode());
+ i.InputDoubleRegister(0), DetermineStubCallMode(),
+ frame_access_state()->has_frame()
+ ? kLRHasBeenSaved
+ : kLRHasNotBeenSaved);
+
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -1953,24 +1986,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 64.
- __ And(shift, i.InputRegister64(1), 63);
- __ Dup(tmp.V2D(), shift);
- __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- tmp.V2D());
+ ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
break;
}
case kArm64I64x2ShrS: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 64.
- __ And(shift, i.InputRegister64(1), 63);
- __ Dup(tmp.V2D(), shift);
- __ Neg(tmp.V2D(), tmp.V2D());
- __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- tmp.V2D());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 6, V2D, Sshl, X);
break;
}
SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
@@ -2045,14 +2065,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister(1);
- // Take shift value modulo 64.
- __ And(shift, i.InputRegister64(1), 63);
- __ Dup(tmp.V2D(), shift);
- __ Neg(tmp.V2D(), tmp.V2D());
- __ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- tmp.V2D());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
}
SIMD_BINOP_CASE(kArm64I64x2GtU, Cmhi, 2D);
@@ -2080,24 +2093,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 32.
- __ And(shift, i.InputRegister32(1), 31);
- __ Dup(tmp.V4S(), shift);
- __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- tmp.V4S());
+ ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
break;
}
case kArm64I32x4ShrS: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 32.
- __ And(shift, i.InputRegister32(1), 31);
- __ Dup(tmp.V4S(), shift);
- __ Neg(tmp.V4S(), tmp.V4S());
- __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- tmp.V4S());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 5, V4S, Sshl, W);
break;
}
SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
@@ -2120,20 +2120,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
case kArm64I32x4ShrU: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 32.
- __ And(shift, i.InputRegister32(1), 31);
- __ Dup(tmp.V4S(), shift);
- __ Neg(tmp.V4S(), tmp.V4S());
- __ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- tmp.V4S());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
}
SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
+ SIMD_UNOP_CASE(kArm64I32x4Abs, Abs, 4S);
+ case kArm64I32x4BitMask: {
+ Register dst = i.OutputRegister32();
+ VRegister src = i.InputSimd128Register(0);
+ VRegister tmp = i.TempSimd128Register(0);
+ VRegister mask = i.TempSimd128Register(1);
+
+ __ Sshr(tmp.V4S(), src.V4S(), 31);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ __ Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001);
+ __ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ __ Addv(tmp.S(), tmp.V4S());
+ __ Mov(dst.W(), tmp.V4S(), 0);
+ break;
+ }
case kArm64I16x8Splat: {
__ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
break;
@@ -2161,24 +2170,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 16.
- __ And(shift, i.InputRegister32(1), 15);
- __ Dup(tmp.V8H(), shift);
- __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
- tmp.V8H());
+ ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
break;
}
case kArm64I16x8ShrS: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 16.
- __ And(shift, i.InputRegister32(1), 15);
- __ Dup(tmp.V8H(), shift);
- __ Neg(tmp.V8H(), tmp.V8H());
- __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
- tmp.V8H());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 4, V8H, Sshl, W);
break;
}
case kArm64I16x8SConvertI32x4: {
@@ -2223,14 +2219,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8ShrU: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 16.
- __ And(shift, i.InputRegister32(1), 15);
- __ Dup(tmp.V8H(), shift);
- __ Neg(tmp.V8H(), tmp.V8H());
- __ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
- tmp.V8H());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
}
case kArm64I16x8UConvertI32x4: {
@@ -2254,6 +2243,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
+ case kArm64I16x8BitMask: {
+ Register dst = i.OutputRegister32();
+ VRegister src = i.InputSimd128Register(0);
+ VRegister tmp = i.TempSimd128Register(0);
+ VRegister mask = i.TempSimd128Register(1);
+
+ __ Sshr(tmp.V8H(), src.V8H(), 15);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ __ Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001);
+ __ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ __ Addv(tmp.H(), tmp.V8H());
+ __ Mov(dst.W(), tmp.V8H(), 0);
+ break;
+ }
case kArm64I8x16Splat: {
__ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
break;
@@ -2279,24 +2284,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 8.
- __ And(shift, i.InputRegister32(1), 7);
- __ Dup(tmp.V16B(), shift);
- __ Sshl(i.OutputSimd128Register().V16B(),
- i.InputSimd128Register(0).V16B(), tmp.V16B());
+ ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 3, V16B, Sshl, W);
break;
}
case kArm64I8x16ShrS: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 8.
- __ And(shift, i.InputRegister32(1), 7);
- __ Dup(tmp.V16B(), shift);
- __ Neg(tmp.V16B(), tmp.V16B());
- __ Sshl(i.OutputSimd128Register().V16B(),
- i.InputSimd128Register(0).V16B(), tmp.V16B());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 3, V16B, Sshl, W);
break;
}
case kArm64I8x16SConvertI16x8: {
@@ -2331,14 +2323,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
- VRegister tmp = i.TempSimd128Register(0);
- Register shift = i.TempRegister32(1);
- // Take shift value modulo 8.
- __ And(shift, i.InputRegister32(1), 7);
- __ Dup(tmp.V16B(), shift);
- __ Neg(tmp.V16B(), tmp.V16B());
- __ Ushl(i.OutputSimd128Register().V16B(),
- i.InputSimd128Register(0).V16B(), tmp.V16B());
+ ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 3, V16B, Ushl, W);
break;
}
case kArm64I8x16UConvertI16x8: {
@@ -2362,6 +2347,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
SIMD_BINOP_CASE(kArm64I8x16RoundingAverageU, Urhadd, 16B);
+ SIMD_UNOP_CASE(kArm64I8x16Abs, Abs, 16B);
+ case kArm64I8x16BitMask: {
+ Register dst = i.OutputRegister32();
+ VRegister src = i.InputSimd128Register(0);
+ VRegister tmp = i.TempSimd128Register(0);
+ VRegister mask = i.TempSimd128Register(1);
+
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ __ Sshr(tmp.V16B(), src.V16B(), 7);
+ __ Movi(mask.V2D(), 0x8040'2010'0804'0201);
+ __ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ __ Ext(mask.V16B(), tmp.V16B(), tmp.V16B(), 8);
+ __ Zip1(tmp.V16B(), tmp.V16B(), mask.V16B());
+ __ Addv(tmp.H(), tmp.V8H());
+ __ Mov(dst.W(), tmp.V8H(), 0);
+ break;
+ }
case kArm64S128Zero: {
__ Movi(i.OutputSimd128Register().V16B(), 0);
break;
@@ -2573,6 +2576,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef SIMD_WIDENING_UNOP_CASE
#undef SIMD_BINOP_CASE
#undef SIMD_REDUCE_OP_CASE
+#undef ASSEMBLE_SIMD_SHIFT_LEFT
+#undef ASSEMBLE_SIMD_SHIFT_RIGHT
// Assemble branches after this instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
@@ -2734,16 +2739,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- Arm64OperandConverter i(this, instr);
- Register input = i.InputRegister32(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ Cmp(input, i.InputInt32(index + 0));
- __ B(eq, GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
UseScratchRegisterScope scope(tasm());
@@ -2754,15 +2749,21 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Cmp(input, case_count);
__ B(hs, GetLabel(i.InputRpo(1)));
__ Adr(temp, &table);
- __ Add(temp, temp, Operand(input, UXTW, 2));
+ int entry_size_log2 = 2;
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ ++entry_size_log2; // Account for BTI.
+#endif
+ __ Add(temp, temp, Operand(input, UXTW, entry_size_log2));
__ Br(temp);
{
TurboAssembler::BlockPoolsScope block_pools(tasm(),
case_count * kInstrSize);
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
+ __ JumpTarget();
__ B(GetLabel(i.InputRpo(index + 2)));
}
+ __ JumpTarget();
}
}
@@ -2801,8 +2802,10 @@ void CodeGenerator::AssembleConstructFrame() {
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
+ DCHECK_EQ(saves.Count() % 2, 0);
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
call_descriptor->CalleeSavedFPRegisters());
+ DCHECK_EQ(saves_fp.Count() % 2, 0);
// The number of slots for returns has to be even to ensure the correct stack
// alignment.
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
@@ -2812,7 +2815,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
} else {
- __ Push(lr, fp);
+ __ Push<TurboAssembler::kSignLR>(lr, fp);
__ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -2856,13 +2859,10 @@ void CodeGenerator::AssembleConstructFrame() {
{
// Finish the frame that hasn't been fully built yet.
UseScratchRegisterScope temps(tasm());
- __ Claim(2); // Claim extra slots for marker + instance.
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
- __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
- __ Str(kWasmInstanceRegister,
- MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
+ __ Push(scratch, kWasmInstanceRegister);
}
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
@@ -2896,23 +2896,25 @@ void CodeGenerator::AssembleConstructFrame() {
break;
case CallDescriptor::kCallCodeObject: {
UseScratchRegisterScope temps(tasm());
- __ Claim(required_slots +
- 1); // Claim extra slot for frame type marker.
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
- __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ __ Push(scratch, padreg);
+ // One of the extra slots has just been claimed when pushing the frame
+ // type marker above. We also know that we have at least one slot to
+ // claim here, as the typed frame has an odd number of fixed slots, and
+ // all other parts of the total frame slots are even, leaving
+ // {required_slots} to be odd.
+ DCHECK_GE(required_slots, 1);
+ __ Claim(required_slots - 1);
} break;
case CallDescriptor::kCallWasmFunction: {
UseScratchRegisterScope temps(tasm());
- __ Claim(required_slots +
- 2); // Claim extra slots for marker + instance.
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
- __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
- __ Str(kWasmInstanceRegister,
- MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
+ __ Push(scratch, kWasmInstanceRegister);
+ __ Claim(required_slots);
} break;
case CallDescriptor::kCallWasmImportWrapper:
case CallDescriptor::kCallWasmCapiFunction: {
@@ -2923,30 +2925,25 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
- int extra_slots =
- call_descriptor->kind() == CallDescriptor::kCallWasmImportWrapper
- ? 2 // Import wrapper: marker + instance.
- : 3; // C-API function: marker + instance + PC.
- __ Claim(required_slots + extra_slots);
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
- __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
- __ Str(kWasmInstanceRegister,
- MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
+ __ Push(scratch, kWasmInstanceRegister);
+ int extra_slots =
+ call_descriptor->kind() == CallDescriptor::kCallWasmImportWrapper
+ ? 0 // Import wrapper: none.
+ : 1; // C-API function: PC.
+ __ Claim(required_slots + extra_slots);
} break;
case CallDescriptor::kCallAddress:
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
- required_slots += 2; // marker + saved c_entry_fp.
- }
- __ Claim(required_slots);
- if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
- __ Str(scratch,
- MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ __ Push(scratch, padreg);
+ // The additional slot will be used for the saved c_entry_fp.
}
+ __ Claim(required_slots);
break;
default:
UNREACHABLE();
@@ -2959,10 +2956,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ PushCPURegList(saves_fp);
// Save registers.
- // TODO(palfia): TF save list is not in sync with
- // CPURegList::GetCalleeSaved(): x30 is missing.
- // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
- __ PushCPURegList(saves);
+ DCHECK_IMPLIES(!saves.IsEmpty(),
+ saves.list() == CPURegList::GetCalleeSaved().list());
+ __ PushCPURegList<TurboAssembler::kSignLR>(saves);
if (returns != 0) {
__ Claim(returns);
@@ -2981,7 +2977,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
- __ PopCPURegList(saves);
+ __ PopCPURegList<TurboAssembler::kAuthLR>(saves);
// Restore fp registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
@@ -3027,7 +3023,11 @@ void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {
__ ForceConstantPoolEmissionWithoutJump();
- __ CheckVeneerPool(false, false, deopt_count * Deoptimizer::kDeoptExitSize);
+ // We are conservative here, assuming all deopts are lazy deopts.
+ DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
+ Deoptimizer::kNonLazyDeoptExitSize);
+ __ CheckVeneerPool(false, false,
+ deopt_count * Deoptimizer::kLazyDeoptExitSize);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 0e78a346a6..e24812f884 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -252,6 +252,8 @@ namespace compiler {
V(Arm64I32x4MaxU) \
V(Arm64I32x4GtU) \
V(Arm64I32x4GeU) \
+ V(Arm64I32x4Abs) \
+ V(Arm64I32x4BitMask) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
@@ -285,6 +287,8 @@ namespace compiler {
V(Arm64I16x8GtU) \
V(Arm64I16x8GeU) \
V(Arm64I16x8RoundingAverageU) \
+ V(Arm64I16x8Abs) \
+ V(Arm64I16x8BitMask) \
V(Arm64I8x16Splat) \
V(Arm64I8x16ExtractLaneU) \
V(Arm64I8x16ExtractLaneS) \
@@ -313,6 +317,8 @@ namespace compiler {
V(Arm64I8x16GtU) \
V(Arm64I8x16GeU) \
V(Arm64I8x16RoundingAverageU) \
+ V(Arm64I8x16Abs) \
+ V(Arm64I8x16BitMask) \
V(Arm64S128Zero) \
V(Arm64S128Dup) \
V(Arm64S128And) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index f032e333c1..72a0b1b012 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -222,6 +222,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4MaxU:
case kArm64I32x4GtU:
case kArm64I32x4GeU:
+ case kArm64I32x4Abs:
+ case kArm64I32x4BitMask:
case kArm64I16x8Splat:
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
@@ -255,6 +257,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8GtU:
case kArm64I16x8GeU:
case kArm64I16x8RoundingAverageU:
+ case kArm64I16x8Abs:
+ case kArm64I16x8BitMask:
case kArm64I8x16Splat:
case kArm64I8x16ExtractLaneU:
case kArm64I8x16ExtractLaneS:
@@ -283,6 +287,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16GtU:
case kArm64I8x16GeU:
case kArm64I8x16RoundingAverageU:
+ case kArm64I8x16Abs:
+ case kArm64I8x16BitMask:
case kArm64S128Zero:
case kArm64S128Dup:
case kArm64S128And:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 3c7c337537..bb204f62a6 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -152,12 +152,25 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
}
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+ Node* node, int width) {
Arm64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ if (g.GetIntegerConstantValue(node->InputAt(1)) % width == 0) {
+ selector->EmitIdentity(node);
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ }
+ } else {
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
+ // We only need a unique register for the first input (src), since in
+ // the codegen we use tmp to store the shifts, and then later use it with
+ // src. The second input can be the same as the second temp (shift).
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ }
}
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
@@ -2578,7 +2591,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range();
@@ -3157,12 +3170,15 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
V(I32x4UConvertI16x8Low, kArm64I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kArm64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kArm64I32x4Abs) \
V(I16x8SConvertI8x16Low, kArm64I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kArm64I16x8SConvertI8x16High) \
V(I16x8Neg, kArm64I16x8Neg) \
V(I16x8UConvertI8x16Low, kArm64I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kArm64I16x8Abs) \
V(I8x16Neg, kArm64I8x16Neg) \
+ V(I8x16Abs, kArm64I8x16Abs) \
V(S128Not, kArm64S128Not) \
V(S1x2AnyTrue, kArm64S1x2AnyTrue) \
V(S1x2AllTrue, kArm64S1x2AllTrue) \
@@ -3174,18 +3190,18 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(S1x16AllTrue, kArm64S1x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
- V(I64x2Shl) \
- V(I64x2ShrS) \
- V(I64x2ShrU) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8ShrU) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
- V(I8x16ShrU)
+ V(I64x2Shl, 64) \
+ V(I64x2ShrS, 64) \
+ V(I64x2ShrU, 64) \
+ V(I32x4Shl, 32) \
+ V(I32x4ShrS, 32) \
+ V(I32x4ShrU, 32) \
+ V(I16x8Shl, 16) \
+ V(I16x8ShrS, 16) \
+ V(I16x8ShrU, 16) \
+ V(I8x16Shl, 8) \
+ V(I8x16ShrS, 8) \
+ V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, kArm64F64x2Add) \
@@ -3319,9 +3335,9 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
#undef SIMD_UNOP_LIST
-#define SIMD_VISIT_SHIFT_OP(Name) \
- void InstructionSelector::Visit##Name(Node* node) { \
- VisitSimdShiftRRR(this, kArm64##Name, node); \
+#define SIMD_VISIT_SHIFT_OP(Name, width) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShiftRRR(this, kArm64##Name, node, width); \
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
@@ -3364,6 +3380,29 @@ VISIT_SIMD_QFMOP(F32x4Qfms)
#undef VISIT_SIMD_QFMOP
namespace {
+template <ArchOpcode opcode>
+void VisitBitMask(InstructionSelector* selector, Node* node) {
+ Arm64OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+}
+} // namespace
+
+void InstructionSelector::VisitI8x16BitMask(Node* node) {
+ VisitBitMask<kArm64I8x16BitMask>(this, node);
+}
+
+void InstructionSelector::VisitI16x8BitMask(Node* node) {
+ VisitBitMask<kArm64I16x8BitMask>(this, node);
+}
+
+void InstructionSelector::VisitI32x4BitMask(Node* node) {
+ VisitBitMask<kArm64I32x4BitMask>(this, node);
+}
+
+namespace {
struct ShuffleEntry {
uint8_t shuffle[kSimd128Size];
diff --git a/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc
index 3747019c7d..c8a570af53 100644
--- a/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.cc
@@ -9,6 +9,9 @@ namespace v8 {
namespace internal {
namespace compiler {
+// TODO(v8:10026): When using CFI, we need to generate unwinding info to tell
+// the unwinder that return addresses are signed.
+
void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
const InstructionBlock* block) {
if (!enabled()) return;
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 530dc0a813..e335135240 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -62,6 +62,10 @@ class InstructionOperandConverter {
return static_cast<int8_t>(InputInt32(index));
}
+ uint8_t InputUint8(size_t index) {
+ return bit_cast<uint8_t>(InputInt8(index));
+ }
+
int16_t InputInt16(size_t index) {
return static_cast<int16_t>(InputInt32(index));
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index a441a36496..9dbd5fac33 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -158,6 +158,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
DeoptimizeKind deopt_kind = exit->kind();
+
DeoptimizeReason deoptimization_reason = exit->reason();
Address deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
@@ -165,7 +166,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
deoptimization_id);
}
- tasm()->CallForDeoptimization(deopt_entry, deoptimization_id);
+
+ if (deopt_kind == DeoptimizeKind::kLazy) {
+ tasm()->BindExceptionHandler(exit->label());
+ } else {
+ ++non_lazy_deopt_count_;
+ tasm()->bind(exit->label());
+ }
+
+ tasm()->CallForDeoptimization(deopt_entry, deoptimization_id, exit->label(),
+ deopt_kind);
exit->set_emitted();
return kSuccess;
}
@@ -186,6 +196,9 @@ void CodeGenerator::AssembleCode() {
AssembleSourcePosition(start_source_position());
}
offsets_info_.code_start_register_check = tasm()->pc_offset();
+
+ tasm()->CodeEntry();
+
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
if (FLAG_debug_code && (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
info->code_kind() == Code::BYTECODE_HANDLER)) {
@@ -312,31 +325,49 @@ void CodeGenerator::AssembleCode() {
// emitted before emitting the deoptimization exits.
PrepareForDeoptimizationExits(static_cast<int>(deoptimization_exits_.size()));
- if (Deoptimizer::kSupportsFixedDeoptExitSize) {
+ if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
deopt_exit_start_offset_ = tasm()->pc_offset();
}
// Assemble deoptimization exits.
offsets_info_.deoptimization_exits = tasm()->pc_offset();
int last_updated = 0;
+ // We sort the deoptimization exits here so that the lazy ones will
+ // be visited last. We need this as on architectures where
+ // Deoptimizer::kSupportsFixedDeoptExitSizes is true, lazy deopts
+ // might need additional instructions.
+ auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
+ static_assert(DeoptimizeKind::kLazy > DeoptimizeKind::kEager,
+ "lazy deopts are expected to be emitted last");
+ static_assert(DeoptimizeKind::kLazy > DeoptimizeKind::kSoft,
+ "lazy deopts are expected to be emitted last");
+ if (a->kind() != b->kind()) {
+ return a->kind() < b->kind();
+ }
+ return a->pc_offset() < b->pc_offset();
+ };
+ if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
+ std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
+ }
+
for (DeoptimizationExit* exit : deoptimization_exits_) {
if (exit->emitted()) continue;
- if (Deoptimizer::kSupportsFixedDeoptExitSize) {
+ if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
exit->set_deoptimization_id(next_deoptimization_id_++);
}
- tasm()->bind(exit->label());
+ result_ = AssembleDeoptimizerCall(exit);
+ if (result_ != kSuccess) return;
// UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
// order, which is always the case since they are added to
- // deoptimization_exits_ in that order.
+ // deoptimization_exits_ in that order, and the optional sort operation
+ // above preserves that order.
if (exit->kind() == DeoptimizeKind::kLazy) {
- int trampoline_pc = tasm()->pc_offset();
+ int trampoline_pc = exit->label()->pos();
last_updated = safepoints()->UpdateDeoptimizationInfo(
exit->pc_offset(), trampoline_pc, last_updated,
exit->deoptimization_id());
}
- result_ = AssembleDeoptimizerCall(exit);
- if (result_ != kSuccess) return;
}
offsets_info_.pools = tasm()->pc_offset();
@@ -432,10 +463,9 @@ OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
-OwnedVector<trap_handler::ProtectedInstructionData>
-CodeGenerator::GetProtectedInstructions() {
- return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
- protected_instructions_);
+OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
+ return OwnedVector<byte>::Of(
+ Vector<byte>::cast(VectorOf(protected_instructions_)));
}
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
@@ -531,6 +561,9 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
const InstructionBlock* block) {
+ if (block->IsHandler()) {
+ tasm()->ExceptionHandler();
+ }
for (int i = block->code_start(); i < block->code_end(); ++i) {
CodeGenResult result = AssembleInstruction(i, block);
if (result != kSuccess) return result;
@@ -883,6 +916,7 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
+ data->SetNonLazyDeoptCount(Smi::FromInt(non_lazy_deopt_count_));
if (info->has_shared_info()) {
data->SetSharedFunctionInfo(*info->shared_info());
@@ -944,6 +978,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (flags & CallDescriptor::kHasExceptionHandler) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
+ DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
}
@@ -1120,7 +1155,7 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
current_source_position_, descriptor->bailout_id(), translation.index(),
pc_offset, entry.kind(), entry.reason());
- if (!Deoptimizer::kSupportsFixedDeoptExitSize) {
+ if (!Deoptimizer::kSupportsFixedDeoptExitSizes) {
exit->set_deoptimization_id(next_deoptimization_id_++);
}
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index e54d988b52..0caefddd97 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -125,8 +125,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
MaybeHandle<Code> FinalizeCode();
OwnedVector<byte> GetSourcePositionTable();
- OwnedVector<trap_handler::ProtectedInstructionData>
- GetProtectedInstructions();
+ OwnedVector<byte> GetProtectedInstructionsData();
InstructionSequence* instructions() const { return instructions_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -253,7 +252,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end);
void AssembleArchBinarySearchSwitch(Instruction* instr);
- void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
// Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
@@ -425,6 +423,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
int next_deoptimization_id_ = 0;
int deopt_exit_start_offset_ = 0;
+ int non_lazy_deopt_count_ = 0;
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_ = 0;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 7484d91dbf..d397ba8241 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -493,6 +493,22 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ cmov(zero, dst, tmp); \
} while (false)
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ if (HasImmediateInput(instr, 1)) { \
+ __ opcode(dst, dst, static_cast<byte>(i.InputInt##width(1))); \
+ } else { \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ Register shift = i.InputRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ and_(shift, Immediate(mask)); \
+ __ Movd(tmp, shift); \
+ __ opcode(dst, dst, tmp); \
+ } \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -883,9 +899,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -905,7 +918,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ int3();
break;
case kArchDebugBreak:
- __ int3();
+ __ DebugBreak();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -2021,12 +2034,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2Shl: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 64.
- __ and_(shift, Immediate(63));
- __ Movd(tmp, shift);
- __ Psllq(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ ASSEMBLE_SIMD_SHIFT(Psllq, 6);
break;
}
case kIA32I64x2ShrS: {
@@ -2086,12 +2094,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2ShrU: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 64.
- __ and_(shift, Immediate(63));
- __ Movd(tmp, shift);
- __ Psrlq(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ ASSEMBLE_SIMD_SHIFT(Psrlq, 6);
break;
}
case kSSEF32x4Splat: {
@@ -2487,44 +2490,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI32x4Shl: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, 31);
- __ movd(tmp, shift);
- __ pslld(i.OutputSimd128Register(), tmp);
+ case kIA32I32x4Shl: {
+ ASSEMBLE_SIMD_SHIFT(Pslld, 5);
break;
}
- case kAVXI32x4Shl: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, 31);
- __ movd(tmp, shift);
- __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
- break;
- }
- case kSSEI32x4ShrS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, 31);
- __ movd(tmp, shift);
- __ psrad(i.OutputSimd128Register(), tmp);
- break;
- }
- case kAVXI32x4ShrS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, 31);
- __ movd(tmp, shift);
- __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ case kIA32I32x4ShrS: {
+ ASSEMBLE_SIMD_SHIFT(Psrad, 5);
break;
}
case kSSEI32x4Add: {
@@ -2717,24 +2688,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovzxwd(dst, dst);
break;
}
- case kSSEI32x4ShrU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, 31);
- __ movd(tmp, shift);
- __ psrld(i.OutputSimd128Register(), tmp);
- break;
- }
- case kAVXI32x4ShrU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ and_(shift, 31);
- __ movd(tmp, shift);
- __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ case kIA32I32x4ShrU: {
+ ASSEMBLE_SIMD_SHIFT(Psrld, 5);
break;
}
case kSSEI32x4MinU: {
@@ -2800,6 +2755,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
+ case kIA32I32x4Abs: {
+ __ Pabsd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2851,44 +2810,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI16x8Shl: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, 15);
- __ movd(tmp, shift);
- __ psllw(i.OutputSimd128Register(), tmp);
- break;
- }
- case kAVXI16x8Shl: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, 15);
- __ movd(tmp, shift);
- __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ case kIA32I16x8Shl: {
+ ASSEMBLE_SIMD_SHIFT(Psllw, 4);
break;
}
- case kSSEI16x8ShrS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, 15);
- __ movd(tmp, shift);
- __ psraw(i.OutputSimd128Register(), tmp);
- break;
- }
- case kAVXI16x8ShrS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, 15);
- __ movd(tmp, shift);
- __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ case kIA32I16x8ShrS: {
+ ASSEMBLE_SIMD_SHIFT(Psraw, 4);
break;
}
case kSSEI16x8SConvertI32x4: {
@@ -3055,24 +2982,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovzxbw(dst, dst);
break;
}
- case kSSEI16x8ShrU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, 15);
- __ movd(tmp, shift);
- __ psrlw(i.OutputSimd128Register(), tmp);
- break;
- }
- case kAVXI16x8ShrU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ and_(shift, 15);
- __ movd(tmp, shift);
- __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp);
+ case kIA32I16x8ShrU: {
+ ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
break;
}
case kSSEI16x8UConvertI32x4: {
@@ -3178,6 +3089,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kIA32I16x8Abs: {
+ __ Pabsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -3231,63 +3146,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI8x16Shl: {
+ case kIA32I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register shift = i.InputRegister(1);
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Take shift value modulo 8.
- __ and_(shift, 7);
- // Mask off the unwanted bits before word-shifting.
- __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ mov(tmp, shift);
- __ add(tmp, Immediate(8));
- __ movd(tmp_simd, tmp);
- __ psrlw(kScratchDoubleReg, tmp_simd);
- __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ pand(dst, kScratchDoubleReg);
- __ movd(tmp_simd, shift);
- __ psllw(dst, tmp_simd);
- break;
- }
- case kAVXI8x16Shl: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- Register shift = i.InputRegister(1);
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Take shift value modulo 8.
- __ and_(shift, 7);
- // Mask off the unwanted bits before word-shifting.
- __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ mov(tmp, shift);
- __ add(tmp, Immediate(8));
- __ movd(tmp_simd, tmp);
- __ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ vpackuswb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpand(dst, src, kScratchDoubleReg);
- __ movd(tmp_simd, shift);
- __ vpsllw(dst, dst, tmp_simd);
+
+ if (HasImmediateInput(instr, 1)) {
+ // Perform 16-bit shift, then mask away low bits.
+ uint8_t shift = i.InputInt3(1);
+ __ Psllw(dst, dst, static_cast<byte>(shift));
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ __ mov(tmp, mask);
+ __ Movd(tmp_simd, tmp);
+ __ Pshufd(tmp_simd, tmp_simd, 0);
+ __ Pand(dst, tmp_simd);
+ } else {
+ Register shift = i.InputRegister(1);
+ // Take shift value modulo 8.
+ __ and_(shift, 7);
+ // Mask off the unwanted bits before word-shifting.
+ __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ mov(tmp, shift);
+ __ add(tmp, Immediate(8));
+ __ Movd(tmp_simd, tmp);
+ __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
+ __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pand(dst, kScratchDoubleReg);
+ __ Movd(tmp_simd, shift);
+ __ Psllw(dst, dst, tmp_simd);
+ }
break;
}
case kIA32I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ punpckhbw(kScratchDoubleReg, dst);
- __ punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ movd(tmp_simd, tmp);
- __ psraw(kScratchDoubleReg, tmp_simd);
- __ psraw(dst, tmp_simd);
- __ packsswb(dst, kScratchDoubleReg);
+ if (HasImmediateInput(instr, 1)) {
+ __ Punpckhbw(kScratchDoubleReg, dst);
+ __ Punpcklbw(dst, dst);
+ uint8_t shift = i.InputInt3(1) + 8;
+ __ Psraw(kScratchDoubleReg, shift);
+ __ Psraw(dst, shift);
+ __ Packsswb(dst, kScratchDoubleReg);
+ } else {
+ Register tmp = i.ToRegister(instr->TempAt(0));
+ XMMRegister tmp_simd = i.TempSimd128Register(1);
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ __ Punpckhbw(kScratchDoubleReg, dst);
+ __ Punpcklbw(dst, dst);
+ __ mov(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ and_(tmp, 7);
+ __ add(tmp, Immediate(8));
+ __ Movd(tmp_simd, tmp);
+ __ Psraw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
+ __ Psraw(dst, dst, tmp_simd);
+ __ Packsswb(dst, kScratchDoubleReg);
+ }
break;
}
case kSSEI8x16Add: {
@@ -3529,21 +3446,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I8x16ShrU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do logical shifts, and repack.
- __ punpckhbw(kScratchDoubleReg, dst);
- __ punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ movd(tmp_simd, tmp);
- __ psrlw(kScratchDoubleReg, tmp_simd);
- __ psrlw(dst, tmp_simd);
- __ packuswb(dst, kScratchDoubleReg);
+
+ if (HasImmediateInput(instr, 1)) {
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = i.InputInt3(1);
+ __ Psrlw(dst, dst, static_cast<byte>(shift));
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ __ mov(tmp, mask);
+ __ Movd(tmp_simd, tmp);
+ __ Pshufd(tmp_simd, tmp_simd, 0);
+ __ Pand(dst, tmp_simd);
+ } else {
+ // Unpack the bytes into words, do logical shifts, and repack.
+ __ Punpckhbw(kScratchDoubleReg, dst);
+ __ Punpcklbw(dst, dst);
+ __ mov(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ and_(tmp, 7);
+ __ add(tmp, Immediate(8));
+ __ Movd(tmp_simd, tmp);
+ __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
+ __ Psrlw(dst, dst, tmp_simd);
+ __ Packuswb(dst, kScratchDoubleReg);
+ }
break;
}
case kSSEI8x16MinU: {
@@ -3611,6 +3542,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kIA32I8x16Abs: {
+ __ Pabsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Pxor(dst, dst);
@@ -4503,16 +4438,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- IA32OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ cmp(input, Immediate(i.InputInt32(index + 0)));
- __ j(equal, GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
IA32OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -4689,7 +4614,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
@@ -5086,6 +5011,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
+#undef ASSEMBLE_SIMD_SHIFT
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index d53c1ecc01..b4c90e2711 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -190,10 +190,8 @@ namespace compiler {
V(IA32I32x4SConvertI16x8Low) \
V(IA32I32x4SConvertI16x8High) \
V(IA32I32x4Neg) \
- V(SSEI32x4Shl) \
- V(AVXI32x4Shl) \
- V(SSEI32x4ShrS) \
- V(AVXI32x4ShrS) \
+ V(IA32I32x4Shl) \
+ V(IA32I32x4ShrS) \
V(SSEI32x4Add) \
V(AVXI32x4Add) \
V(SSEI32x4AddHoriz) \
@@ -218,8 +216,7 @@ namespace compiler {
V(AVXI32x4UConvertF32x4) \
V(IA32I32x4UConvertI16x8Low) \
V(IA32I32x4UConvertI16x8High) \
- V(SSEI32x4ShrU) \
- V(AVXI32x4ShrU) \
+ V(IA32I32x4ShrU) \
V(SSEI32x4MinU) \
V(AVXI32x4MinU) \
V(SSEI32x4MaxU) \
@@ -228,6 +225,7 @@ namespace compiler {
V(AVXI32x4GtU) \
V(SSEI32x4GeU) \
V(AVXI32x4GeU) \
+ V(IA32I32x4Abs) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneU) \
V(IA32I16x8ExtractLaneS) \
@@ -236,10 +234,8 @@ namespace compiler {
V(IA32I16x8SConvertI8x16Low) \
V(IA32I16x8SConvertI8x16High) \
V(IA32I16x8Neg) \
- V(SSEI16x8Shl) \
- V(AVXI16x8Shl) \
- V(SSEI16x8ShrS) \
- V(AVXI16x8ShrS) \
+ V(IA32I16x8Shl) \
+ V(IA32I16x8ShrS) \
V(SSEI16x8SConvertI32x4) \
V(AVXI16x8SConvertI32x4) \
V(SSEI16x8Add) \
@@ -268,8 +264,7 @@ namespace compiler {
V(AVXI16x8GeS) \
V(IA32I16x8UConvertI8x16Low) \
V(IA32I16x8UConvertI8x16High) \
- V(SSEI16x8ShrU) \
- V(AVXI16x8ShrU) \
+ V(IA32I16x8ShrU) \
V(SSEI16x8UConvertI32x4) \
V(AVXI16x8UConvertI32x4) \
V(SSEI16x8AddSaturateU) \
@@ -285,6 +280,7 @@ namespace compiler {
V(SSEI16x8GeU) \
V(AVXI16x8GeU) \
V(IA32I16x8RoundingAverageU) \
+ V(IA32I16x8Abs) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLaneU) \
V(IA32I8x16ExtractLaneS) \
@@ -293,8 +289,7 @@ namespace compiler {
V(SSEI8x16SConvertI16x8) \
V(AVXI8x16SConvertI16x8) \
V(IA32I8x16Neg) \
- V(SSEI8x16Shl) \
- V(AVXI8x16Shl) \
+ V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
V(SSEI8x16Add) \
V(AVXI8x16Add) \
@@ -334,6 +329,7 @@ namespace compiler {
V(SSEI8x16GeU) \
V(AVXI8x16GeU) \
V(IA32I8x16RoundingAverageU) \
+ V(IA32I8x16Abs) \
V(IA32S128Zero) \
V(SSES128Not) \
V(AVXS128Not) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 61c39a17e3..020136403a 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -171,10 +171,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4SConvertI16x8Low:
case kIA32I32x4SConvertI16x8High:
case kIA32I32x4Neg:
- case kSSEI32x4Shl:
- case kAVXI32x4Shl:
- case kSSEI32x4ShrS:
- case kAVXI32x4ShrS:
+ case kIA32I32x4Shl:
+ case kIA32I32x4ShrS:
case kSSEI32x4Add:
case kAVXI32x4Add:
case kSSEI32x4AddHoriz:
@@ -199,8 +197,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI32x4UConvertF32x4:
case kIA32I32x4UConvertI16x8Low:
case kIA32I32x4UConvertI16x8High:
- case kSSEI32x4ShrU:
- case kAVXI32x4ShrU:
+ case kIA32I32x4ShrU:
case kSSEI32x4MinU:
case kAVXI32x4MinU:
case kSSEI32x4MaxU:
@@ -209,6 +206,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI32x4GtU:
case kSSEI32x4GeU:
case kAVXI32x4GeU:
+ case kIA32I32x4Abs:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneU:
case kIA32I16x8ExtractLaneS:
@@ -217,10 +215,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8SConvertI8x16Low:
case kIA32I16x8SConvertI8x16High:
case kIA32I16x8Neg:
- case kSSEI16x8Shl:
- case kAVXI16x8Shl:
- case kSSEI16x8ShrS:
- case kAVXI16x8ShrS:
+ case kIA32I16x8Shl:
+ case kIA32I16x8ShrS:
case kSSEI16x8SConvertI32x4:
case kAVXI16x8SConvertI32x4:
case kSSEI16x8Add:
@@ -249,8 +245,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8GeS:
case kIA32I16x8UConvertI8x16Low:
case kIA32I16x8UConvertI8x16High:
- case kSSEI16x8ShrU:
- case kAVXI16x8ShrU:
+ case kIA32I16x8ShrU:
case kSSEI16x8UConvertI32x4:
case kAVXI16x8UConvertI32x4:
case kSSEI16x8AddSaturateU:
@@ -266,6 +261,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEI16x8GeU:
case kAVXI16x8GeU:
case kIA32I16x8RoundingAverageU:
+ case kIA32I16x8Abs:
case kIA32I8x16Splat:
case kIA32I8x16ExtractLaneU:
case kIA32I8x16ExtractLaneS:
@@ -274,8 +270,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEI8x16SConvertI16x8:
case kAVXI8x16SConvertI16x8:
case kIA32I8x16Neg:
- case kSSEI8x16Shl:
- case kAVXI8x16Shl:
+ case kIA32I8x16Shl:
case kIA32I8x16ShrS:
case kSSEI8x16Add:
case kAVXI8x16Add:
@@ -315,6 +310,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEI8x16GeU:
case kAVXI8x16GeU:
case kIA32I8x16RoundingAverageU:
+ case kIA32I8x16Abs:
case kIA32S128Zero:
case kSSES128Not:
case kAVXS128Not:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index cb02f56e48..36a70c8fa8 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -305,22 +305,23 @@ void VisitRRISimd(InstructionSelector* selector, Node* node,
}
void VisitRROSimdShift(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempSimd128Register()};
- if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
- arraysize(temps), temps);
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1,
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
arraysize(temps), temps);
}
}
-void VisitRROI8x16SimdRightShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitRROI8x16SimdShift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
@@ -328,7 +329,6 @@ void VisitRROI8x16SimdRightShift(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
arraysize(temps), temps);
}
-
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -1631,7 +1631,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
IA32OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range();
@@ -2004,21 +2004,22 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
InstructionCode code = kIA32Word32AtomicPairCompareExchange |
AddressingModeField::encode(addressing_mode);
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ InstructionOperand temps[2];
+ size_t temp_count = 0;
+ if (projection0) {
+ outputs[output_count++] = g.DefineAsFixed(projection0, eax);
+ } else {
+ temps[temp_count++] = g.TempRegister(eax);
+ }
if (projection1) {
- InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
- g.DefineAsFixed(projection1, edx)};
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, 0, {});
- } else if (projection0) {
- InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
- InstructionOperand temps[] = {g.TempRegister(edx)};
- const int num_temps = arraysize(temps);
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- num_temps, temps);
+ outputs[output_count++] = g.DefineAsFixed(projection1, edx);
} else {
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
- const int num_temps = arraysize(temps);
- Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ temps[temp_count++] = g.TempRegister(edx);
}
+ Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
+ temps);
}
#define SIMD_INT_TYPES(V) \
@@ -2107,12 +2108,15 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4Neg) \
V(I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High) \
+ V(I32x4Abs) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I16x8Neg) \
V(I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High) \
- V(I8x16Neg)
+ V(I16x8Abs) \
+ V(I8x16Neg) \
+ V(I8x16Abs)
#define SIMD_UNOP_PREFIX_LIST(V) \
V(F32x4Abs) \
@@ -2130,21 +2134,15 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(S1x8AllTrue) \
V(S1x16AllTrue)
-#define SIMD_SHIFT_OPCODES(V) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8ShrU)
-
#define SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(V) \
V(I64x2Shl) \
- V(I64x2ShrU)
-
-#define SIMD_I8X16_RIGHT_SHIFT_OPCODES(V) \
- V(I8x16ShrS) \
- V(I8x16ShrU)
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU)
void InstructionSelector::VisitF64x2Min(Node* node) {
IA32OperandGenerator g(this);
@@ -2360,30 +2358,14 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
VISIT_SIMD_REPLACE_LANE_USE_REG(F64x2)
#undef VISIT_SIMD_REPLACE_LANE_USE_REG
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \
- }
-SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
-#undef VISIT_SIMD_SHIFT
-#undef SIMD_SHIFT_OPCODES
-
-#define VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROSimdShift(this, node, kIA32##Opcode, kIA32##Opcode); \
+#define VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROSimdShift(this, node, kIA32##Opcode); \
}
SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX
#undef SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX
-#define VISIT_SIMD_I8x16_RIGHT_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROI8x16SimdRightShift(this, node, kIA32##Opcode); \
- }
-SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8x16_RIGHT_SHIFT)
-#undef SIMD_I8X16_RIGHT_SHIFT_OPCODES
-#undef VISIT_SIMD_I8x16_RIGHT_SHIFT
-
#define VISIT_SIMD_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
@@ -2463,15 +2445,36 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
void InstructionSelector::VisitI8x16Shl(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- if (IsSupported(AVX)) {
- Emit(kAVXI8x16Shl, g.DefineAsRegister(node), operand0, operand1,
- arraysize(temps), temps);
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ this->Emit(kIA32I8x16Shl, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
} else {
- Emit(kSSEI8x16Shl, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ VisitRROI8x16SimdShift(this, node, kIA32I8x16Shl);
+ }
+}
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) {
+ IA32OperandGenerator g(this);
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ this->Emit(kIA32I8x16ShrS, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrS);
+ }
+}
+
+void InstructionSelector::VisitI8x16ShrU(Node* node) {
+ IA32OperandGenerator g(this);
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ this->Emit(kIA32I8x16ShrU, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
+ } else {
+ VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrU);
}
}
@@ -2700,7 +2703,10 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
// pshufd takes a single imm8 shuffle mask.
opcode = kIA32S32x4Swizzle;
no_same_as_first = true;
- src0_needs_reg = false;
+ // TODO(v8:9198): This doesn't strictly require a register, forcing the
+ // swizzles to always use registers until generation of incorrect memory
+ // operands can be fixed.
+ src0_needs_reg = true;
imms[imm_count++] = shuffle_mask;
}
} else {
@@ -2713,7 +2719,11 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
} else {
opcode = kIA32S32x4Shuffle;
no_same_as_first = true;
- src0_needs_reg = false;
+ // TODO(v8:9198): src0 and src1 is used by pshufd in codegen, which
+ // requires memory to be 16-byte aligned, since we cannot guarantee that
+ // yet, force using a register here.
+ src0_needs_reg = true;
+ src1_needs_reg = true;
imms[imm_count++] = shuffle_mask;
int8_t blend_mask = PackBlend4(shuffle32x4);
imms[imm_count++] = blend_mask;
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 23e4e44b76..84d5d249b8 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -19,7 +19,7 @@
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/backend/x64/instruction-codes-x64.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/compiler/backend/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/backend/s390/instruction-codes-s390.h"
@@ -82,7 +82,6 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchCallBuiltinPointer) \
V(ArchJmp) \
V(ArchBinarySearchSwitch) \
- V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
V(ArchAbortCSAAssert) \
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index dd4bca4d81..4f26130d66 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -263,7 +263,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchDeoptimize:
case kArchJmp:
case kArchBinarySearchSwitch:
- case kArchLookupSwitch:
case kArchRet:
case kArchTableSwitch:
case kArchThrowTerminator:
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index 7569d1280a..aa7da85e42 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -11,6 +11,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -46,12 +47,6 @@ class SwitchInfo {
}
}
- // Ensure that comparison order of if-cascades is preserved.
- std::vector<CaseInfo> CasesSortedByOriginalOrder() const {
- std::vector<CaseInfo> result(cases_.begin(), cases_.end());
- std::stable_sort(result.begin(), result.end());
- return result;
- }
std::vector<CaseInfo> CasesSortedByValue() const {
std::vector<CaseInfo> result(cases_.begin(), cases_.end());
std::stable_sort(result.begin(), result.end(),
@@ -314,6 +309,19 @@ class OperandGenerator {
return Constant(OpParameter<int32_t>(node->op()));
case IrOpcode::kInt64Constant:
return Constant(OpParameter<int64_t>(node->op()));
+ case IrOpcode::kTaggedIndexConstant: {
+ // Unencoded index value.
+ intptr_t value =
+ static_cast<intptr_t>(OpParameter<int32_t>(node->op()));
+ DCHECK(TaggedIndex::IsValid(value));
+ // Generate it as 32/64-bit constant in a tagged form.
+ Address tagged_index = TaggedIndex::FromIntptr(value).ptr();
+ if (kSystemPointerSize == kInt32Size) {
+ return Constant(static_cast<int32_t>(tagged_index));
+ } else {
+ return Constant(static_cast<int64_t>(tagged_index));
+ }
+ }
case IrOpcode::kFloat32Constant:
return Constant(OpParameter<float>(node->op()));
case IrOpcode::kRelocatableInt32Constant:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 6c6db9f6d7..7d72dbbf2d 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -574,11 +574,7 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
StateValueList* values, InstructionOperandVector* inputs,
OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
MachineType type, FrameStateInputKind kind, Zone* zone) {
- if (input == nullptr) {
- values->PushOptimizedOut();
- return 0;
- }
-
+ DCHECK_NOT_NULL(input);
switch (input->opcode()) {
case IrOpcode::kArgumentsElementsState: {
values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
@@ -636,6 +632,26 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
}
+size_t InstructionSelector::AddInputsToFrameStateDescriptor(
+ StateValueList* values, InstructionOperandVector* inputs,
+ OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* node,
+ FrameStateInputKind kind, Zone* zone) {
+ size_t entries = 0;
+ StateValuesAccess::iterator it = StateValuesAccess(node).begin();
+ // Take advantage of sparse nature of StateValuesAccess to skip over multiple
+ // empty nodes at once pushing repeated OptimizedOuts all in one go.
+ while (!it.done()) {
+ values->PushOptimizedOut(it.AdvanceTillNotEmpty());
+ if (it.done()) break;
+ StateValuesAccess::TypedNode input_node = *it;
+ entries += AddOperandToStateValueDescriptor(values, inputs, g, deduplicator,
+ input_node.node,
+ input_node.type, kind, zone);
+ ++it;
+ }
+ return entries;
+}
+
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
@@ -669,30 +685,25 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
DCHECK_EQ(values_descriptor->size(), 0u);
values_descriptor->ReserveSize(descriptor->GetSize());
+ DCHECK_NOT_NULL(function);
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
- for (StateValuesAccess::TypedNode input_node :
- StateValuesAccess(parameters)) {
- entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
- deduplicator, input_node.node,
- input_node.type, kind, zone);
- }
+
+ entries += AddInputsToFrameStateDescriptor(
+ values_descriptor, inputs, g, deduplicator, parameters, kind, zone);
+
if (descriptor->HasContext()) {
+ DCHECK_NOT_NULL(context);
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, context,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
}
- for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
- entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
- deduplicator, input_node.node,
- input_node.type, kind, zone);
- }
- for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
- entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
- deduplicator, input_node.node,
- input_node.type, kind, zone);
- }
+
+ entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g,
+ deduplicator, locals, kind, zone);
+ entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g,
+ deduplicator, stack, kind, zone);
DCHECK_EQ(initial_size + entries, inputs->size());
return entries;
}
@@ -798,21 +809,6 @@ void InstructionSelector::AppendDeoptimizeArguments(
instruction_zone());
}
-Instruction* InstructionSelector::EmitDeoptimize(
- InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
- size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, FeedbackSource const& feedback,
- Node* frame_state) {
- InstructionOperandVector args(instruction_zone());
- for (size_t i = 0; i < input_count; ++i) {
- args.push_back(inputs[i]);
- }
- opcode |= MiscField::encode(static_cast<int>(input_count));
- AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
- return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
- nullptr);
-}
-
// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
@@ -1335,6 +1331,7 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
+ case IrOpcode::kTaggedIndexConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
@@ -2026,6 +2023,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4GtU(node);
case IrOpcode::kI32x4GeU:
return MarkAsSimd128(node), VisitI32x4GeU(node);
+ case IrOpcode::kI32x4Abs:
+ return MarkAsSimd128(node), VisitI32x4Abs(node);
+ case IrOpcode::kI32x4BitMask:
+ return MarkAsWord32(node), VisitI32x4BitMask(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2092,6 +2093,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI16x8RoundingAverageU:
return MarkAsSimd128(node), VisitI16x8RoundingAverageU(node);
+ case IrOpcode::kI16x8Abs:
+ return MarkAsSimd128(node), VisitI16x8Abs(node);
+ case IrOpcode::kI16x8BitMask:
+ return MarkAsWord32(node), VisitI16x8BitMask(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLaneU:
@@ -2148,6 +2153,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16GeU(node);
case IrOpcode::kI8x16RoundingAverageU:
return MarkAsSimd128(node), VisitI8x16RoundingAverageU(node);
+ case IrOpcode::kI8x16Abs:
+ return MarkAsSimd128(node), VisitI8x16Abs(node);
+ case IrOpcode::kI8x16BitMask:
+ return MarkAsWord32(node), VisitI8x16BitMask(node);
case IrOpcode::kS128Zero:
return MarkAsSimd128(node), VisitS128Zero(node);
case IrOpcode::kS128And:
@@ -2336,23 +2345,6 @@ void InstructionSelector::EmitTableSwitch(
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
-void InstructionSelector::EmitLookupSwitch(
- const SwitchInfo& sw, InstructionOperand const& value_operand) {
- OperandGenerator g(this);
- std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
- size_t input_count = 2 + sw.case_count() * 2;
- DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = g.Label(sw.default_branch());
- for (size_t index = 0; index < cases.size(); ++index) {
- const CaseInfo& c = cases[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
- inputs[index * 2 + 2 + 1] = g.Label(c.branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
-}
-
void InstructionSelector::EmitBinarySearchSwitch(
const SwitchInfo& sw, InstructionOperand const& value_operand) {
OperandGenerator g(this);
@@ -2362,8 +2354,6 @@ void InstructionSelector::EmitBinarySearchSwitch(
inputs[0] = value_operand;
inputs[1] = g.Label(sw.default_branch());
std::vector<CaseInfo> cases = sw.CasesSortedByValue();
- std::stable_sort(cases.begin(), cases.end(),
- [](CaseInfo a, CaseInfo b) { return a.value < b.value; });
for (size_t index = 0; index < cases.size(); ++index) {
const CaseInfo& c = cases[index];
inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
@@ -2583,7 +2573,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2607,7 +2597,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
@@ -2620,7 +2610,7 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
}
#endif // !V8_TARGET_ARCH_IA32
-#if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
#if !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
@@ -2642,7 +2632,13 @@ void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
+
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2990,9 +2986,10 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* value) {
- EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
- feedback, value);
+ Node* frame_state) {
+ InstructionOperandVector args(instruction_zone());
+ AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
+ Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
}
void InstructionSelector::VisitThrow(Node* node) {
@@ -3055,17 +3052,10 @@ namespace {
FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
DCHECK_EQ(kFrameStateInputCount, state->InputCount());
- FrameStateInfo state_info = FrameStateInfoOf(state->op());
-
- int parameters = static_cast<int>(
- StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
- int locals = static_cast<int>(
- StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
- int stack = static_cast<int>(
- StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
-
- DCHECK_EQ(parameters, state_info.parameter_count());
- DCHECK_EQ(locals, state_info.local_count());
+ const FrameStateInfo& state_info = FrameStateInfoOf(state->op());
+ int parameters = state_info.parameter_count();
+ int locals = state_info.local_count();
+ int stack = state_info.type() == FrameStateType::kInterpretedFunction ? 1 : 0;
FrameStateDescriptor* outer_state = nullptr;
Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
@@ -3242,7 +3232,6 @@ bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
}
UNREACHABLE();
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index a142993ef2..5950591286 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -351,15 +351,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont);
- // ===========================================================================
- // ===== Architecture-independent deoptimization exit emission methods. ======
- // ===========================================================================
- Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
- InstructionOperand* outputs, size_t input_count,
- InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason,
- FeedbackSource const& feedback,
- Node* frame_state);
+ void EmitIdentity(Node* node);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
@@ -508,8 +500,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand const& index_operand);
- void EmitLookupSwitch(const SwitchInfo& sw,
- InstructionOperand const& value_operand);
void EmitBinarySearchSwitch(const SwitchInfo& sw,
InstructionOperand const& value_operand);
@@ -586,6 +576,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
StateObjectDeduplicator* deduplicator,
InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone);
+ size_t AddInputsToFrameStateDescriptor(StateValueList* values,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* node, FrameStateInputKind kind,
+ Zone* zone);
size_t AddOperandToStateValueDescriptor(StateValueList* values,
InstructionOperandVector* inputs,
OperandGenerator* g,
@@ -636,7 +632,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* value);
+ FeedbackSource const& feedback, Node* frame_state);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
@@ -655,7 +651,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
const CallDescriptor* call_descriptor, Node* node);
- void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
// ===========================================================================
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 92fa95270c..e189100c34 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1212,8 +1212,8 @@ class StateValueList {
void PushPlain(MachineType type) {
fields_.push_back(StateValueDescriptor::Plain(type));
}
- void PushOptimizedOut() {
- fields_.push_back(StateValueDescriptor::OptimizedOut());
+ void PushOptimizedOut(size_t num = 1) {
+ fields_.insert(fields_.end(), num, StateValueDescriptor::OptimizedOut());
}
iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
@@ -1362,6 +1362,8 @@ class V8_EXPORT_PRIVATE InstructionBlock final
bool IsDeferred() const { return deferred_; }
bool IsHandler() const { return handler_; }
+ void MarkHandler() { handler_ = true; }
+ void UnmarkHandler() { handler_ = false; }
RpoNumber ao_number() const { return ao_number_; }
RpoNumber rpo_number() const { return rpo_number_; }
@@ -1416,7 +1418,7 @@ class V8_EXPORT_PRIVATE InstructionBlock final
int32_t code_start_; // start index of arch-specific code.
int32_t code_end_ = -1; // end index of arch-specific code.
const bool deferred_; // Block contains deferred code.
- const bool handler_; // Block is a handler entry point.
+ bool handler_; // Block is a handler entry point.
bool switch_target_ = false;
bool alignment_ = false; // insert alignment before this block
bool needs_frame_ = false;
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index ee195bf51e..488a13c1a9 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -165,8 +165,19 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
// Skip empty blocks when the previous block doesn't fall through.
bool prev_fallthru = true;
for (auto const block : code->instruction_blocks()) {
- int block_num = block->rpo_number().ToInt();
- skip[block_num] = !prev_fallthru && result[block_num].ToInt() != block_num;
+ RpoNumber block_rpo = block->rpo_number();
+ int block_num = block_rpo.ToInt();
+ RpoNumber result_rpo = result[block_num];
+ skip[block_num] = !prev_fallthru && result_rpo != block_rpo;
+
+ if (result_rpo != block_rpo) {
+ // We need the handler information to be propagated, so that branch
+ // targets are annotated as necessary for control flow integrity
+ // checks (when enabled).
+ if (code->InstructionBlockAt(block_rpo)->IsHandler()) {
+ code->InstructionBlockAt(result_rpo)->MarkHandler();
+ }
+ }
bool fallthru = true;
for (int i = block->code_start(); i < block->code_end(); ++i) {
@@ -179,6 +190,8 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
// Overwrite a redundant jump with a nop.
TRACE("jt-fw nop @%d\n", i);
instr->OverwriteWithNop();
+ // If this block was marked as a handler, it can be unmarked now.
+ code->InstructionBlockAt(block_rpo)->UnmarkHandler();
}
fallthru = false; // jumps don't fall through to the next block.
}
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index dd35d47dc4..8e1ce0f2a3 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -787,7 +787,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
// from start_call to return address.
- int offset = 40;
+ int offset = __ root_array_available() ? 68 : 80;
#if V8_HOST_ARCH_MIPS
if (__ emit_debug_code()) {
offset += 16;
@@ -840,9 +840,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -859,7 +856,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ stop();
break;
case kArchDebugBreak:
- __ stop();
+ __ DebugBreak();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -2439,6 +2436,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kMipsI32x4Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2600,6 +2603,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kMipsI16x8Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
case kMipsI8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2761,6 +2770,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kMipsI8x16Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
case kMipsS128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3681,16 +3696,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- MipsOperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ li(kScratchReg, Operand(i.InputInt32(index + 0)));
- __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3747,7 +3752,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index c3458e7b91..0a37dd7068 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -203,6 +203,7 @@ namespace compiler {
V(MipsI32x4GeS) \
V(MipsI32x4GtU) \
V(MipsI32x4GeU) \
+ V(MipsI32x4Abs) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -230,6 +231,7 @@ namespace compiler {
V(MipsI16x8GtU) \
V(MipsI16x8GeU) \
V(MipsI16x8RoundingAverageU) \
+ V(MipsI16x8Abs) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLaneU) \
V(MipsI8x16ExtractLaneS) \
@@ -256,6 +258,7 @@ namespace compiler {
V(MipsI8x16GtU) \
V(MipsI8x16GeU) \
V(MipsI8x16RoundingAverageU) \
+ V(MipsI8x16Abs) \
V(MipsS128And) \
V(MipsS128Or) \
V(MipsS128Xor) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index c6a7f6b95a..81bbfbbfb9 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -137,6 +137,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8UConvertI32x4:
case kMipsI16x8UConvertI8x16High:
case kMipsI16x8UConvertI8x16Low:
+ case kMipsI16x8Abs:
case kMipsI32x4Add:
case kMipsI32x4AddHoriz:
case kMipsI32x4Eq:
@@ -164,6 +165,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4UConvertF32x4:
case kMipsI32x4UConvertI16x8High:
case kMipsI32x4UConvertI16x8Low:
+ case kMipsI32x4Abs:
case kMipsI8x16Add:
case kMipsI8x16AddSaturateS:
case kMipsI8x16AddSaturateU:
@@ -192,6 +194,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16SubSaturateS:
case kMipsI8x16SubSaturateU:
case kMipsI8x16UConvertI16x8:
+ case kMipsI8x16Abs:
case kMipsIns:
case kMipsLsa:
case kMipsMaddD:
@@ -1295,13 +1298,9 @@ int AssembleArchJumpLatency() {
return Latency::BRANCH;
}
-int AssembleArchLookupSwitchLatency(int cases) {
- return cases * (1 + Latency::BRANCH) + AssembleArchJumpLatency();
-}
-
int AssembleArchBinarySearchSwitchLatency(int cases) {
if (cases < CodeGenerator::kBinarySearchSwitchMinimalCases) {
- return AssembleArchLookupSwitchLatency(cases);
+ return cases * (1 + Latency::BRANCH) + AssembleArchJumpLatency();
}
return 1 + Latency::BRANCH + AssembleArchBinarySearchSwitchLatency(cases / 2);
}
@@ -1390,8 +1389,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArchBinarySearchSwitch:
return AssembleArchBinarySearchSwitchLatency((instr->InputCount() - 2) /
2);
- case kArchLookupSwitch:
- return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
case kArchAbortCSAAssert:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 22f8217e4d..cd87f36913 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -1708,7 +1708,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 9 + sw.value_range();
@@ -2169,6 +2169,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeS, kMipsI32x4GeS) \
V(I32x4GtU, kMipsI32x4GtU) \
V(I32x4GeU, kMipsI32x4GeU) \
+ V(I32x4Abs, kMipsI32x4Abs) \
V(I16x8Add, kMipsI16x8Add) \
V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
@@ -2190,6 +2191,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \
V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \
+ V(I16x8Abs, kMipsI16x8Abs) \
V(I8x16Add, kMipsI8x16Add) \
V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
@@ -2210,6 +2212,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \
V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
+ V(I8x16Abs, kMipsI8x16Abs) \
V(S128And, kMipsS128And) \
V(S128Or, kMipsS128Or) \
V(S128Xor, kMipsS128Xor) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 1e36c6ed92..224b23fffc 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -765,7 +765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
// from start_call to return address.
- int offset = 48;
+ int offset = __ root_array_available() ? 76 : 88;
#if V8_HOST_ARCH_MIPS64
if (__ emit_debug_code()) {
offset += 16;
@@ -818,8 +818,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
@@ -837,7 +835,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ stop();
break;
case kArchDebugBreak:
- __ stop();
+ __ DebugBreak();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
@@ -2492,6 +2490,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kMips64I32x4Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2653,6 +2657,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kMips64I16x8Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
case kMips64I8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2814,6 +2824,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kMips64I8x16Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
case kMips64S128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3780,16 +3796,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- MipsOperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ li(kScratchReg, Operand(i.InputInt32(index + 0)));
- __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3844,7 +3850,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index bed1080cbe..c752381c8c 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -233,6 +233,7 @@ namespace compiler {
V(Mips64I32x4GeS) \
V(Mips64I32x4GtU) \
V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -260,6 +261,7 @@ namespace compiler {
V(Mips64I16x8GtU) \
V(Mips64I16x8GeU) \
V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLaneU) \
V(Mips64I8x16ExtractLaneS) \
@@ -286,6 +288,7 @@ namespace compiler {
V(Mips64I8x16GtU) \
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
V(Mips64S128And) \
V(Mips64S128Or) \
V(Mips64S128Xor) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index ff6517be00..0261d915fb 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -167,6 +167,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8UConvertI8x16High:
case kMips64I16x8UConvertI8x16Low:
case kMips64I16x8RoundingAverageU:
+ case kMips64I16x8Abs:
case kMips64I32x4Add:
case kMips64I32x4AddHoriz:
case kMips64I32x4Eq:
@@ -194,6 +195,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4UConvertF32x4:
case kMips64I32x4UConvertI16x8High:
case kMips64I32x4UConvertI16x8Low:
+ case kMips64I32x4Abs:
case kMips64I8x16Add:
case kMips64I8x16AddSaturateS:
case kMips64I8x16AddSaturateU:
@@ -220,6 +222,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16SubSaturateS:
case kMips64I8x16SubSaturateU:
case kMips64I8x16RoundingAverageU:
+ case kMips64I8x16Abs:
case kMips64Ins:
case kMips64Lsa:
case kMips64MaxD:
@@ -945,14 +948,6 @@ int AssembleArchJumpLatency() {
return Latency::BRANCH;
}
-int AssembleArchLookupSwitchLatency(const Instruction* instr) {
- int latency = 0;
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- latency += 1 + Latency::BRANCH;
- }
- return latency + AssembleArchJumpLatency();
-}
-
int GenerateSwitchTableLatency() {
int latency = 0;
if (kArchVariant >= kMips64r6) {
@@ -1301,8 +1296,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return CallCFunctionLatency();
case kArchJmp:
return AssembleArchJumpLatency();
- case kArchLookupSwitch:
- return AssembleArchLookupSwitchLatency(instr);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
case kArchAbortCSAAssert:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 12a12e456c..5a0e41ccbe 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -2290,7 +2290,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Mips64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 10 + 2 * sw.value_range();
@@ -2757,12 +2757,15 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kMips64I32x4Abs) \
V(I16x8Neg, kMips64I16x8Neg) \
V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kMips64I16x8Abs) \
V(I8x16Neg, kMips64I8x16Neg) \
+ V(I8x16Abs, kMips64I8x16Abs) \
V(S128Not, kMips64S128Not) \
V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
V(S1x4AllTrue, kMips64S1x4AllTrue) \
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index c43295f03f..addbd76ffb 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -1031,7 +1031,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
- int offset = 9 * kInstrSize;
+ int offset = 20 * kInstrSize;
#if defined(_AIX)
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
@@ -1041,7 +1041,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
- offset = 11 * kInstrSize;
+ offset = 22 * kInstrSize;
}
#endif
if (isWasmCapiFunction) {
@@ -1096,10 +1096,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1117,7 +1113,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ stop();
break;
case kArchDebugBreak:
- __ stop();
+ __ DebugBreak();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -2315,16 +2311,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- PPCOperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
- __ beq(GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -2401,7 +2387,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index a7b4e3e870..7e29b00c31 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -1658,7 +1658,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
PPCOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range();
@@ -2457,6 +2457,12 @@ void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16Abs(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Abs(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index f37faf850d..9420269ca0 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -2808,9 +2808,9 @@ bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
auto iter2 = other->uses_.begin();
while (iter1 != uses_.end() && iter2 != other->uses_.end()) {
- if (iter1->start > iter2->end) {
+ if (iter1->start >= iter2->end) {
++iter2;
- } else if (iter2->start > iter1->end) {
+ } else if (iter2->start >= iter1->end) {
++iter1;
} else {
TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
@@ -3016,13 +3016,14 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
LiveRange* check_use = live_at_header;
for (; check_use != nullptr && check_use->Start() < pos;
check_use = check_use->next()) {
- UsePosition* next_use =
- check_use->NextUsePositionRegisterIsBeneficial(loop_start);
- if (next_use != nullptr && next_use->pos() < pos) {
+ UsePosition* next_use = check_use->NextRegisterPosition(loop_start);
+ // UsePosition at the end of a UseInterval may
+ // have the same value as the start of next range.
+ if (next_use != nullptr && next_use->pos() <= pos) {
return pos;
}
}
- // No register beneficial use inside the loop before the pos.
+ // No register use inside the loop before the pos.
*begin_spill_out = live_at_header;
pos = loop_start;
break;
@@ -4323,6 +4324,10 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
// We can just spill it.
+ LiveRange* begin_spill = nullptr;
+ LifetimePosition spill_pos = FindOptimalSpillingPos(
+ current, current->Start(), spill_mode, &begin_spill);
+ MaybeSpillPreviousRanges(begin_spill, spill_pos, current);
Spill(current, spill_mode);
return;
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 6c943dbe68..9e1a7beff9 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -721,6 +721,7 @@ class LiveRangeBundle : public ZoneObject {
private:
friend class BundleBuilder;
+ // Representation of the non-empty interval [start,end[.
class Range {
public:
Range(int s, int e) : start(s), end(e) {}
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 5b7d0adb09..24552cf632 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -204,6 +204,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTaggedPointer(value_, value_);
+ }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
@@ -1330,7 +1333,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadP(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ LoadTaggedPointerField(
+ ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
__ LoadW(ip,
FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
@@ -1467,13 +1471,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ LoadP(kScratchReg,
- FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpP(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r4,
+ FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1559,9 +1564,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -1578,7 +1580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ stop();
break;
case kArchDebugBreak:
- __ stop();
+ __ DebugBreak();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -1646,14 +1648,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
- __ StoreP(value, MemOperand(object, offset));
+ __ StoreTaggedField(value, MemOperand(object, offset), r0);
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
- __ StoreP(value, MemOperand(object, offset));
+ __ StoreTaggedField(value, MemOperand(object, offset));
}
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
@@ -2246,16 +2248,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Push:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- if (op->representation() == MachineRepresentation::kFloat64) {
- __ lay(sp, MemOperand(sp, -kDoubleSize));
- __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
- __ lay(sp, MemOperand(sp, -kSystemPointerSize));
- __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
- frame_access_state()->IncreaseSPDelta(1);
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ lay(sp, MemOperand(sp, -kDoubleSize));
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize /
+ kSystemPointerSize);
+ break;
+ case MachineRepresentation::kSimd128: {
+ __ lay(sp, MemOperand(sp, -kSimd128Size));
+ __ StoreSimd128(i.InputDoubleRegister(0), MemOperand(sp),
+ kScratchReg);
+ frame_access_state()->IncreaseSPDelta(kSimd128Size /
+ kSystemPointerSize);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
} else {
__ Push(i.InputRegister(0));
@@ -2285,10 +2299,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, slot * kSystemPointerSize));
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
__ StoreFloat32(i.InputDoubleRegister(0),
MemOperand(sp, slot * kSystemPointerSize));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ __ StoreSimd128(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kSystemPointerSize),
+ kScratchReg);
}
} else {
__ StoreP(i.InputRegister(0),
@@ -2887,6 +2905,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// vector replicate element
+ case kS390_F64x2Splat: {
+ __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
+ Condition(3));
+ break;
+ }
case kS390_F32x4Splat: {
#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
@@ -2897,6 +2920,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
break;
}
+ case kS390_I64x2Splat: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(3));
+ __ vrep(dst, dst, Operand(0), Condition(3));
+ break;
+ }
case kS390_I32x4Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(2));
@@ -2916,47 +2945,136 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector extract element
+ case kS390_F64x2ExtractLane: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
+ Operand(1 - i.InputInt8(1)), Condition(3));
+#else
+ __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
+ Operand(i.InputInt8(1)), Condition(3));
+#endif
+ break;
+ }
case kS390_F32x4ExtractLane: {
+#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
Operand(3 - i.InputInt8(1)), Condition(2));
+#else
+ __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
+ Operand(i.InputInt8(1)), Condition(2));
+#endif
+ break;
+ }
+ case kS390_I64x2ExtractLane: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+ MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
+#else
+ __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+ MemOperand(r0, i.InputInt8(1)), Condition(3));
+#endif
break;
}
case kS390_I32x4ExtractLane: {
+#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
+#else
+ __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+ MemOperand(r0, i.InputInt8(1)), Condition(2));
+#endif
break;
}
case kS390_I16x8ExtractLaneU: {
+#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
+#else
+ __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+ MemOperand(r0, i.InputInt8(1)), Condition(1));
+#endif
break;
}
case kS390_I16x8ExtractLaneS: {
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vlgv(kScratchReg, i.InputSimd128Register(0),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
+#else
+ __ vlgv(kScratchReg, i.InputSimd128Register(0),
+ MemOperand(r0, i.InputInt8(1)), Condition(1));
+#endif
+ __ lghr(i.OutputRegister(), kScratchReg);
break;
}
case kS390_I8x16ExtractLaneU: {
+#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
+#else
+ __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+ MemOperand(r0, i.InputInt8(1)), Condition(0));
+#endif
break;
}
case kS390_I8x16ExtractLaneS: {
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vlgv(kScratchReg, i.InputSimd128Register(0),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
+#else
+ __ vlgv(kScratchReg, i.InputSimd128Register(0),
+ MemOperand(r0, i.InputInt8(1)), Condition(0));
+#endif
+ __ lgbr(i.OutputRegister(), kScratchReg);
break;
}
// vector replace element
+ case kS390_F64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
+ __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
+ Condition(3));
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vlvg(kScratchDoubleReg, kScratchReg,
+ MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
+#else
+ __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
+ Condition(3));
+#endif
+ __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
+ break;
+ }
case kS390_F32x4ReplaceLane: {
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
+ __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
+ Condition(2));
+ __ vlvg(kScratchDoubleReg, kScratchReg,
+ MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
+#else
+ __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 1),
+ Condition(2));
+ __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
+ Condition(2));
+#endif
+ __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
+ break;
+ }
+ case kS390_I64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
- __ lgdr(kScratchReg, i.InputDoubleRegister(2));
- __ srlg(kScratchReg, kScratchReg, Operand(32));
- __ vlvg(i.OutputSimd128Register(), kScratchReg,
- MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
+ MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
+#else
+ __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
+ MemOperand(r0, i.InputInt8(1)), Condition(3));
+#endif
break;
}
case kS390_I32x4ReplaceLane: {
@@ -2965,8 +3083,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
+#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
+#else
+ __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
+ MemOperand(r0, i.InputInt8(1)), Condition(2));
+#endif
break;
}
case kS390_I16x8ReplaceLane: {
@@ -2975,8 +3098,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
+#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
+#else
+ __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
+ MemOperand(r0, i.InputInt8(1)), Condition(1));
+#endif
break;
}
case kS390_I8x16ReplaceLane: {
@@ -2985,17 +3113,81 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
+#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
+#else
+ __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
+ MemOperand(r0, i.InputInt8(1)), Condition(0));
+#endif
break;
}
// vector binops
+ case kS390_F64x2Add: {
+ __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Sub: {
+ __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Mul: {
+ __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Div: {
+ __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Min: {
+ __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(1), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Max: {
+ __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(1), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Qfma: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vfma(dst, src1, src2, src0, Condition(3), Condition(0));
+ break;
+ }
+ case kS390_F64x2Qfms: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vfnms(dst, src1, src2, src0, Condition(3), Condition(0));
+ break;
+ }
case kS390_F32x4Add: {
__ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
+#define FLOAT_ADD_HORIZ(src0, src1, scratch0, scratch1, add0, add1) \
+ __ vpk(dst, src0, src1, Condition(0), Condition(0), Condition(3)); \
+ __ vesrl(scratch0, src0, MemOperand(r0, shift_bits), Condition(3)); \
+ __ vesrl(scratch1, src1, MemOperand(r0, shift_bits), Condition(3)); \
+ __ vpk(kScratchDoubleReg, scratch0, scratch1, Condition(0), Condition(0), \
+ Condition(3)); \
+ __ vfa(dst, add0, add1, Condition(0), Condition(0), Condition(2));
case kS390_F32x4AddHoriz: {
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -3003,16 +3195,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
DoubleRegister tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
constexpr int shift_bits = 32;
- // generate first operand
- __ vpk(dst, src1, src0, Condition(0), Condition(0), Condition(3));
- // generate second operand
- __ vesrl(tempFPReg1, src0, MemOperand(r0, shift_bits), Condition(3));
- __ vesrl(tempFPReg2, src1, MemOperand(r0, shift_bits), Condition(3));
- __ vpk(kScratchDoubleReg, tempFPReg2, tempFPReg1, Condition(0),
- Condition(0), Condition(3));
- // add the operands
- __ vfa(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
- Condition(2));
+#ifdef V8_TARGET_BIG_ENDIAN
+ FLOAT_ADD_HORIZ(src1, src0, tempFPReg2, tempFPReg1, kScratchDoubleReg,
+ dst)
+#else
+ FLOAT_ADD_HORIZ(src0, src1, tempFPReg1, tempFPReg2, dst,
+ kScratchDoubleReg)
+#endif
+#undef FLOAT_ADD_HORIZ
break;
}
case kS390_F32x4Sub: {
@@ -3027,6 +3217,67 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
+ case kS390_F32x4Div: {
+ __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F32x4Min: {
+ __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(1), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F32x4Max: {
+ __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(1), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F32x4Qfma: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vfma(dst, src1, src2, src0, Condition(2), Condition(0));
+ break;
+ }
+ case kS390_F32x4Qfms: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vfnms(dst, src1, src2, src0, Condition(2), Condition(0));
+ break;
+ }
+ case kS390_I64x2Add: {
+ __ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_I64x2Sub: {
+ __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_I64x2Mul: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Register scratch_0 = r0;
+ Register scratch_1 = r1;
+ for (int i = 0; i < 2; i++) {
+ __ vlgv(scratch_0, src0, MemOperand(r0, 0), Condition(3));
+ __ vlgv(scratch_1, src1, MemOperand(r0, 0), Condition(3));
+ __ Mul64(scratch_0, scratch_1);
+ scratch_0 = r1;
+ scratch_1 = ip;
+ }
+ __ vlvgp(i.OutputSimd128Register(), r0, r1);
+ break;
+ }
case kS390_I32x4Add: {
__ va(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3043,8 +3294,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
__ vsumg(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
Condition(0), Condition(2));
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
Condition(3));
+#else
+ __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
+ Condition(3));
+#endif
break;
}
case kS390_I32x4Sub: {
@@ -3075,8 +3331,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(1));
__ vsum(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
Condition(0), Condition(1));
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
Condition(2));
+#else
+ __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
+ Condition(2));
+#endif
break;
}
case kS390_I16x8Sub: {
@@ -3109,13 +3370,63 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
+ case kS390_I16x8RoundingAverageU: {
+ __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(1));
+ break;
+ }
+ case kS390_I8x16RoundingAverageU: {
+ __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(0));
+ break;
+ }
// vector comparisons
+ case kS390_F64x2Eq: {
+ __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Ne: {
+ __ vfce(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Le: {
+ __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Lt: {
+ __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_I64x2MinS: {
+ __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
case kS390_I32x4MinS: {
__ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
+ case kS390_I64x2MinU: {
+ __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
case kS390_I32x4MinU: {
__ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3146,12 +3457,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
+ case kS390_I64x2MaxS: {
+ __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
case kS390_I32x4MaxS: {
__ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
+ case kS390_I64x2MaxU: {
+ __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
case kS390_I32x4MaxU: {
__ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3188,6 +3511,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
+ case kS390_I64x2Eq: {
+ __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4Eq: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3204,12 +3532,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_F32x4Ne: {
- __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ vfce(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(2));
+ break;
+ }
+ case kS390_I64x2Ne: {
+ __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
__ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(2));
+ Condition(3));
break;
}
case kS390_I32x4Ne: {
@@ -3248,11 +3583,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
+ case kS390_I64x2GtS: {
+ __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4GtS: {
__ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
break;
}
+ case kS390_I64x2GeS: {
+ __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg, Condition(0), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4GeS: {
__ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3262,11 +3611,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
break;
}
+ case kS390_I64x2GtU: {
+ __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4GtU: {
__ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
break;
}
+ case kS390_I64x2GeU: {
+ __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg, Condition(0), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4GeU: {
__ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3342,6 +3705,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
}
+ case kS390_I64x2Shl: {
+ VECTOR_SHIFT(veslv, 3);
+ break;
+ }
+ case kS390_I64x2ShrS: {
+ VECTOR_SHIFT(vesrav, 3);
+ break;
+ }
+ case kS390_I64x2ShrU: {
+ VECTOR_SHIFT(vesrlv, 3);
+ break;
+ }
case kS390_I32x4Shl: {
VECTOR_SHIFT(veslv, 2);
break;
@@ -3379,6 +3754,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector unary ops
+ case kS390_F64x2Abs: {
+ __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(2), Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Neg: {
+ __ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(0), Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Sqrt: {
+ __ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(0), Condition(0), Condition(3));
+ break;
+ }
case kS390_F32x4Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
Condition(2), Condition(0), Condition(2));
@@ -3389,6 +3779,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(2));
break;
}
+ case kS390_I64x2Neg: {
+ __ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4Neg: {
__ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(2));
@@ -3407,7 +3802,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_F32x4RecipApprox: {
__ lgfi(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
+#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
+#else
+ __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
+#endif
__ vfd(i.OutputSimd128Register(), kScratchDoubleReg,
i.InputSimd128Register(0), Condition(0), Condition(0),
Condition(2));
@@ -3419,18 +3818,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
__ lgfi(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
+#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
+#else
+ __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
+#endif
__ vfd(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(0), Condition(2));
break;
}
+ case kS390_F32x4Sqrt: {
+ __ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(0), Condition(0), Condition(2));
+ break;
+ }
case kS390_S128Not: {
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
__ vno(dst, src, src, Condition(0), Condition(0), Condition(0));
break;
}
+ case kS390_I8x16Abs: {
+ __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(0));
+ break;
+ }
+ case kS390_I16x8Abs: {
+ __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(1));
+ break;
+ }
+ case kS390_I32x4Abs: {
+ __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(2));
+ break;
+ }
// vector boolean unops
+ case kS390_S1x2AnyTrue:
case kS390_S1x4AnyTrue:
case kS390_S1x8AnyTrue:
case kS390_S1x16AnyTrue: {
@@ -3443,20 +3867,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ locgr(Condition(8), dst, temp);
break;
}
- case kS390_S1x4AllTrue:
- case kS390_S1x8AllTrue:
+#define SIMD_ALL_TRUE(mode) \
+ Simd128Register src = i.InputSimd128Register(0); \
+ Register dst = i.OutputRegister(); \
+ Register temp = i.TempRegister(0); \
+ __ lgfi(temp, Operand(1)); \
+ __ xgr(dst, dst); \
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
+ Condition(0), Condition(2)); \
+ __ vceq(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0), \
+ Condition(mode)); \
+ __ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(0)); \
+ __ locgr(Condition(8), dst, temp);
+ case kS390_S1x2AllTrue: {
+ SIMD_ALL_TRUE(3)
+ break;
+ }
+ case kS390_S1x4AllTrue: {
+ SIMD_ALL_TRUE(2)
+ break;
+ }
+ case kS390_S1x8AllTrue: {
+ SIMD_ALL_TRUE(1)
+ break;
+ }
case kS390_S1x16AllTrue: {
- Simd128Register src = i.InputSimd128Register(0);
- Register dst = i.OutputRegister();
- Register temp = i.TempRegister(0);
- __ lgfi(temp, Operand(1));
- __ xgr(dst, dst);
- __ vceq(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(2));
- __ vtm(src, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
- __ locgr(Condition(1), dst, temp);
+ SIMD_ALL_TRUE(0)
break;
}
+#undef SIMD_ALL_TRUE
// vector bitwise ops
case kS390_S128And: {
Simd128Register dst = i.OutputSimd128Register();
@@ -3481,8 +3921,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_S128Zero: {
Simd128Register dst = i.OutputSimd128Register();
- Simd128Register src = i.InputSimd128Register(1);
- __ vx(dst, dst, src, Condition(0), Condition(0), Condition(0));
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
break;
}
case kS390_S128Select: {
@@ -3493,6 +3932,284 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vsel(dst, src1, src2, mask, Condition(0), Condition(0));
break;
}
+ case kS390_S128AndNot: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vnc(dst, i.InputSimd128Register(0), src, Condition(0), Condition(0),
+ Condition(0));
+ break;
+ }
+ // vector conversions
+#define CONVERT_FLOAT_TO_INT32(convert) \
+ for (int index = 0; index < 4; index++) { \
+ __ vlgv(kScratchReg, kScratchDoubleReg, MemOperand(r0, index), \
+ Condition(2)); \
+ __ MovIntToFloat(tempFPReg1, kScratchReg); \
+ __ convert(kScratchReg, tempFPReg1, kRoundToZero); \
+ __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
+ }
+ case kS390_I32x4SConvertF32x4: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ // NaN to 0
+ __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
+ __ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(2));
+ __ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
+ Condition(0), Condition(0));
+ CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32)
+ break;
+ }
+ case kS390_I32x4UConvertF32x4: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ // NaN to 0, negative to 0
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vfmax(kScratchDoubleReg, src, kScratchDoubleReg, Condition(1),
+ Condition(0), Condition(2));
+ CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32)
+ break;
+ }
+#undef CONVERT_FLOAT_TO_INT32
+#define CONVERT_INT32_TO_FLOAT(convert, double_index) \
+ Simd128Register src = i.InputSimd128Register(0); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ for (int index = 0; index < 4; index++) { \
+ __ vlgv(kScratchReg, src, MemOperand(r0, index), Condition(2)); \
+ __ convert(kScratchDoubleReg, kScratchReg); \
+ __ MovFloatToInt(kScratchReg, kScratchDoubleReg); \
+ __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
+ }
+ case kS390_F32x4SConvertI32x4: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 0)
+#else
+ CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 1)
+#endif
+ break;
+ }
+ case kS390_F32x4UConvertI32x4: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 0)
+#else
+ CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 1)
+#endif
+ break;
+ }
+#undef CONVERT_INT32_TO_FLOAT
+#define VECTOR_UNPACK(op, mode) \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0), \
+ Condition(0), Condition(mode));
+ case kS390_I32x4SConvertI16x8Low: {
+ VECTOR_UNPACK(vupl, 1)
+ break;
+ }
+ case kS390_I32x4SConvertI16x8High: {
+ VECTOR_UNPACK(vuph, 1)
+ break;
+ }
+ case kS390_I32x4UConvertI16x8Low: {
+ VECTOR_UNPACK(vupll, 1)
+ break;
+ }
+ case kS390_I32x4UConvertI16x8High: {
+ VECTOR_UNPACK(vuplh, 1)
+ break;
+ }
+ case kS390_I16x8SConvertI8x16Low: {
+ VECTOR_UNPACK(vupl, 0)
+ break;
+ }
+ case kS390_I16x8SConvertI8x16High: {
+ VECTOR_UNPACK(vuph, 0)
+ break;
+ }
+ case kS390_I16x8UConvertI8x16Low: {
+ VECTOR_UNPACK(vupll, 0)
+ break;
+ }
+ case kS390_I16x8UConvertI8x16High: {
+ VECTOR_UNPACK(vuplh, 0)
+ break;
+ }
+#undef VECTOR_UNPACK
+ case kS390_I16x8SConvertI32x4:
+ __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(2));
+ break;
+ case kS390_I8x16SConvertI16x8:
+ __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(1));
+ break;
+#define VECTOR_PACK_UNSIGNED(mode) \
+ Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0)); \
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
+ Condition(0), Condition(mode)); \
+ __ vmx(tempFPReg, i.InputSimd128Register(0), kScratchDoubleReg, \
+ Condition(0), Condition(0), Condition(mode)); \
+ __ vmx(kScratchDoubleReg, i.InputSimd128Register(1), kScratchDoubleReg, \
+ Condition(0), Condition(0), Condition(mode)); \
+ __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg, \
+ Condition(0), Condition(mode));
+ case kS390_I16x8UConvertI32x4: {
+ // treat inputs as signed, and saturate to unsigned (negative to 0)
+ VECTOR_PACK_UNSIGNED(2)
+ break;
+ }
+ case kS390_I8x16UConvertI16x8: {
+ // treat inputs as signed, and saturate to unsigned (negative to 0)
+ VECTOR_PACK_UNSIGNED(1)
+ break;
+ }
+#undef VECTOR_PACK_UNSIGNED
+#define BINOP_EXTRACT(op, extract_high, extract_low, mode) \
+ Simd128Register src1 = i.InputSimd128Register(0); \
+ Simd128Register src2 = i.InputSimd128Register(1); \
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)); \
+ Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1)); \
+ __ extract_high(kScratchDoubleReg, src1, Condition(0), Condition(0), \
+ Condition(mode)); \
+ __ extract_high(tempFPReg1, src2, Condition(0), Condition(0), \
+ Condition(mode)); \
+ __ op(kScratchDoubleReg, kScratchDoubleReg, tempFPReg1, Condition(0), \
+ Condition(0), Condition(mode + 1)); \
+ __ extract_low(tempFPReg1, src1, Condition(0), Condition(0), \
+ Condition(mode)); \
+ __ extract_low(tempFPReg2, src2, Condition(0), Condition(0), \
+ Condition(mode)); \
+ __ op(tempFPReg1, tempFPReg1, tempFPReg2, Condition(0), Condition(0), \
+ Condition(mode + 1));
+ case kS390_I16x8AddSaturateS: {
+ BINOP_EXTRACT(va, vuph, vupl, 1)
+ __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_I16x8SubSaturateS: {
+ BINOP_EXTRACT(vs, vuph, vupl, 1)
+ __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_I16x8AddSaturateU: {
+ BINOP_EXTRACT(va, vuplh, vupll, 1)
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_I16x8SubSaturateU: {
+ BINOP_EXTRACT(vs, vuplh, vupll, 1)
+ // negative to 0
+ __ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
+ Condition(0));
+ __ vmx(kScratchDoubleReg, tempFPReg2, kScratchDoubleReg, Condition(0),
+ Condition(0), Condition(2));
+ __ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
+ Condition(2));
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_I8x16AddSaturateS: {
+ BINOP_EXTRACT(va, vuph, vupl, 0)
+ __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(1));
+ break;
+ }
+ case kS390_I8x16SubSaturateS: {
+ BINOP_EXTRACT(vs, vuph, vupl, 0)
+ __ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(1));
+ break;
+ }
+ case kS390_I8x16AddSaturateU: {
+ BINOP_EXTRACT(va, vuplh, vupll, 0)
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(1));
+ break;
+ }
+ case kS390_I8x16SubSaturateU: {
+ BINOP_EXTRACT(vs, vuplh, vupll, 0)
+ // negative to 0
+ __ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
+ Condition(0));
+ __ vmx(kScratchDoubleReg, tempFPReg2, kScratchDoubleReg, Condition(0),
+ Condition(0), Condition(1));
+ __ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
+ Condition(1));
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
+ Condition(0), Condition(1));
+ break;
+ }
+#undef BINOP_EXTRACT
+ case kS390_S8x16Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ int32_t k8x16_indices[] = {i.InputInt32(2), i.InputInt32(3),
+ i.InputInt32(4), i.InputInt32(5)};
+ // create 2 * 8 byte inputs indicating new indices
+ for (int i = 0, j = 0; i < 2; i++, j = +2) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
+ __ aih(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
+#else
+ __ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
+ __ aih(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
+#endif
+ }
+ __ vlvgp(kScratchDoubleReg, ip, r0);
+ __ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
+ break;
+ }
+ case kS390_S8x16Swizzle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ // input needs to be reversed
+ __ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
+ __ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
+ __ lrvgr(r0, r0);
+ __ lrvgr(r1, r1);
+ __ vlvgp(kScratchDoubleReg, r1, r0);
+ // clear scr0
+ __ vx(src0, src0, src0, Condition(0), Condition(0), Condition(0));
+ __ vperm(dst, kScratchDoubleReg, src0, src1, Condition(0), Condition(0));
+#else
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vperm(dst, src0, kScratchDoubleReg, src1, Condition(0), Condition(0));
+#endif
+ break;
+ }
+ case kS390_StoreCompressTagged: {
+ CHECK(!instr->HasOutput());
+ size_t index = 0;
+ AddressingMode mode = kMode_None;
+ MemOperand operand = i.MemoryOperand(&mode, &index);
+ Register value = i.InputRegister(index);
+ __ StoreTaggedField(value, operand, r1);
+ break;
+ }
+ case kS390_LoadDecompressTaggedSigned: {
+ CHECK(instr->HasOutput());
+ __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ break;
+ }
+ case kS390_LoadDecompressTaggedPointer: {
+ CHECK(instr->HasOutput());
+ __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
+ break;
+ }
+ case kS390_LoadDecompressAnyTagged: {
+ CHECK(instr->HasOutput());
+ __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3653,16 +4370,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- S390OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ Cmp32(input, Operand(i.InputInt32(index + 0)));
- __ beq(GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
S390OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -3729,14 +4436,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
- __ LoadP(kJSFunctionRegister,
- FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
- __ LoadP(kWasmInstanceRegister,
- FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ LoadTaggedPointerField(
+ kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset), r0);
+ __ LoadTaggedPointerField(
+ kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset), r0);
__ Push(kWasmInstanceRegister);
if (call_descriptor->IsWasmCapiFunction()) {
// Reserve space for saving the PC later.
@@ -3956,9 +4665,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
- case Constant::kCompressedHeapObject:
- UNREACHABLE();
+ case Constant::kCompressedHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
break;
@@ -3986,17 +4702,29 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsFPRegister()) {
- DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DoubleRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ if (destination->IsSimd128Register()) {
+ __ vlr(g.ToSimd128Register(destination), g.ToSimd128Register(source),
+ Condition(0), Condition(0), Condition(0));
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ StoreSimd128(g.ToSimd128Register(source),
+ g.ToMemOperand(destination), kScratchReg);
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- LocationOperand* op = LocationOperand::cast(source);
- if (op->representation() == MachineRepresentation::kFloat64) {
- __ StoreDouble(src, g.ToMemOperand(destination));
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
} else {
- __ StoreFloat32(src, g.ToMemOperand(destination));
+ DCHECK(destination->IsFPStackSlot());
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(src, g.ToMemOperand(destination));
+ } else {
+ __ StoreFloat32(src, g.ToMemOperand(destination));
+ }
}
}
} else if (source->IsFPStackSlot()) {
@@ -4006,8 +4734,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
LocationOperand* op = LocationOperand::cast(source);
if (op->representation() == MachineRepresentation::kFloat64) {
__ LoadDouble(g.ToDoubleRegister(destination), src);
- } else {
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
__ LoadFloat32(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ __ LoadSimd128(g.ToSimd128Register(destination), g.ToMemOperand(source),
+ kScratchReg);
}
} else {
LocationOperand* op = LocationOperand::cast(source);
@@ -4015,9 +4747,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (op->representation() == MachineRepresentation::kFloat64) {
__ LoadDouble(temp, src);
__ StoreDouble(temp, g.ToMemOperand(destination));
- } else {
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
__ LoadFloat32(temp, src);
__ StoreFloat32(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ __ LoadSimd128(kScratchDoubleReg, g.ToMemOperand(source), kScratchReg);
+ __ StoreSimd128(kScratchDoubleReg, g.ToMemOperand(destination),
+ kScratchReg);
}
}
} else {
@@ -4067,13 +4804,23 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} else if (source->IsFloatStackSlot()) {
DCHECK(destination->IsFloatStackSlot());
__ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
- kScratchDoubleReg, d0);
+ kScratchDoubleReg);
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleStackSlot());
__ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
- kScratchDoubleReg, d0);
+ kScratchDoubleReg);
} else if (source->IsSimd128Register()) {
- UNREACHABLE();
+ Simd128Register src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ __ SwapSimd128(src, g.ToSimd128Register(destination), kScratchDoubleReg);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ SwapSimd128(src, g.ToMemOperand(destination), kScratchDoubleReg);
+ }
+ } else if (source->IsSimd128StackSlot()) {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(destination),
+ kScratchDoubleReg);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 165c811787..6101b22166 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -197,6 +197,24 @@ namespace compiler {
V(S390_Word64AtomicXorUint16) \
V(S390_Word64AtomicXorUint32) \
V(S390_Word64AtomicXorUint64) \
+ V(S390_F64x2Splat) \
+ V(S390_F64x2ReplaceLane) \
+ V(S390_F64x2Abs) \
+ V(S390_F64x2Neg) \
+ V(S390_F64x2Sqrt) \
+ V(S390_F64x2Add) \
+ V(S390_F64x2Sub) \
+ V(S390_F64x2Mul) \
+ V(S390_F64x2Div) \
+ V(S390_F64x2Eq) \
+ V(S390_F64x2Ne) \
+ V(S390_F64x2Lt) \
+ V(S390_F64x2Le) \
+ V(S390_F64x2Min) \
+ V(S390_F64x2Max) \
+ V(S390_F64x2ExtractLane) \
+ V(S390_F64x2Qfma) \
+ V(S390_F64x2Qfms) \
V(S390_F32x4Splat) \
V(S390_F32x4ExtractLane) \
V(S390_F32x4ReplaceLane) \
@@ -212,6 +230,34 @@ namespace compiler {
V(S390_F32x4Neg) \
V(S390_F32x4RecipApprox) \
V(S390_F32x4RecipSqrtApprox) \
+ V(S390_F32x4SConvertI32x4) \
+ V(S390_F32x4UConvertI32x4) \
+ V(S390_F32x4Sqrt) \
+ V(S390_F32x4Div) \
+ V(S390_F32x4Min) \
+ V(S390_F32x4Max) \
+ V(S390_F32x4Qfma) \
+ V(S390_F32x4Qfms) \
+ V(S390_I64x2Neg) \
+ V(S390_I64x2Add) \
+ V(S390_I64x2Sub) \
+ V(S390_I64x2Shl) \
+ V(S390_I64x2ShrS) \
+ V(S390_I64x2ShrU) \
+ V(S390_I64x2Mul) \
+ V(S390_I64x2Splat) \
+ V(S390_I64x2ReplaceLane) \
+ V(S390_I64x2ExtractLane) \
+ V(S390_I64x2Eq) \
+ V(S390_I64x2Ne) \
+ V(S390_I64x2GtS) \
+ V(S390_I64x2GeS) \
+ V(S390_I64x2GtU) \
+ V(S390_I64x2GeU) \
+ V(S390_I64x2MinS) \
+ V(S390_I64x2MinU) \
+ V(S390_I64x2MaxS) \
+ V(S390_I64x2MaxU) \
V(S390_I32x4Splat) \
V(S390_I32x4ExtractLane) \
V(S390_I32x4ReplaceLane) \
@@ -230,10 +276,17 @@ namespace compiler {
V(S390_I32x4GtU) \
V(S390_I32x4GeU) \
V(S390_I32x4Neg) \
- V(S390_I16x8Splat) \
V(S390_I32x4Shl) \
V(S390_I32x4ShrS) \
V(S390_I32x4ShrU) \
+ V(S390_I32x4SConvertF32x4) \
+ V(S390_I32x4UConvertF32x4) \
+ V(S390_I32x4SConvertI16x8Low) \
+ V(S390_I32x4SConvertI16x8High) \
+ V(S390_I32x4UConvertI16x8Low) \
+ V(S390_I32x4UConvertI16x8High) \
+ V(S390_I32x4Abs) \
+ V(S390_I16x8Splat) \
V(S390_I16x8ExtractLaneU) \
V(S390_I16x8ExtractLaneS) \
V(S390_I16x8ReplaceLane) \
@@ -255,6 +308,18 @@ namespace compiler {
V(S390_I16x8ShrS) \
V(S390_I16x8ShrU) \
V(S390_I16x8Neg) \
+ V(S390_I16x8SConvertI32x4) \
+ V(S390_I16x8UConvertI32x4) \
+ V(S390_I16x8SConvertI8x16Low) \
+ V(S390_I16x8SConvertI8x16High) \
+ V(S390_I16x8UConvertI8x16Low) \
+ V(S390_I16x8UConvertI8x16High) \
+ V(S390_I16x8AddSaturateS) \
+ V(S390_I16x8SubSaturateS) \
+ V(S390_I16x8AddSaturateU) \
+ V(S390_I16x8SubSaturateU) \
+ V(S390_I16x8RoundingAverageU) \
+ V(S390_I16x8Abs) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
@@ -276,9 +341,21 @@ namespace compiler {
V(S390_I8x16ShrS) \
V(S390_I8x16ShrU) \
V(S390_I8x16Neg) \
+ V(S390_I8x16SConvertI16x8) \
+ V(S390_I8x16UConvertI16x8) \
+ V(S390_I8x16AddSaturateS) \
+ V(S390_I8x16SubSaturateS) \
+ V(S390_I8x16AddSaturateU) \
+ V(S390_I8x16SubSaturateU) \
+ V(S390_I8x16RoundingAverageU) \
+ V(S390_I8x16Abs) \
+ V(S390_S8x16Shuffle) \
+ V(S390_S8x16Swizzle) \
+ V(S390_S1x2AnyTrue) \
V(S390_S1x4AnyTrue) \
V(S390_S1x8AnyTrue) \
V(S390_S1x16AnyTrue) \
+ V(S390_S1x2AllTrue) \
V(S390_S1x4AllTrue) \
V(S390_S1x8AllTrue) \
V(S390_S1x16AllTrue) \
@@ -288,8 +365,13 @@ namespace compiler {
V(S390_S128Zero) \
V(S390_S128Not) \
V(S390_S128Select) \
+ V(S390_S128AndNot) \
V(S390_StoreSimd128) \
- V(S390_LoadSimd128)
+ V(S390_LoadSimd128) \
+ V(S390_StoreCompressTagged) \
+ V(S390_LoadDecompressTaggedSigned) \
+ V(S390_LoadDecompressTaggedPointer) \
+ V(S390_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 50d23766b6..502ce229f5 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -143,6 +143,24 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_CompressSigned:
case kS390_CompressPointer:
case kS390_CompressAny:
+ case kS390_F64x2Splat:
+ case kS390_F64x2ReplaceLane:
+ case kS390_F64x2Abs:
+ case kS390_F64x2Neg:
+ case kS390_F64x2Sqrt:
+ case kS390_F64x2Add:
+ case kS390_F64x2Sub:
+ case kS390_F64x2Mul:
+ case kS390_F64x2Div:
+ case kS390_F64x2Eq:
+ case kS390_F64x2Ne:
+ case kS390_F64x2Lt:
+ case kS390_F64x2Le:
+ case kS390_F64x2Min:
+ case kS390_F64x2Max:
+ case kS390_F64x2ExtractLane:
+ case kS390_F64x2Qfma:
+ case kS390_F64x2Qfms:
case kS390_F32x4Splat:
case kS390_F32x4ExtractLane:
case kS390_F32x4ReplaceLane:
@@ -158,6 +176,34 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F32x4Neg:
case kS390_F32x4RecipApprox:
case kS390_F32x4RecipSqrtApprox:
+ case kS390_F32x4SConvertI32x4:
+ case kS390_F32x4UConvertI32x4:
+ case kS390_F32x4Sqrt:
+ case kS390_F32x4Div:
+ case kS390_F32x4Min:
+ case kS390_F32x4Max:
+ case kS390_F32x4Qfma:
+ case kS390_F32x4Qfms:
+ case kS390_I64x2Neg:
+ case kS390_I64x2Add:
+ case kS390_I64x2Sub:
+ case kS390_I64x2Shl:
+ case kS390_I64x2ShrS:
+ case kS390_I64x2ShrU:
+ case kS390_I64x2Mul:
+ case kS390_I64x2Splat:
+ case kS390_I64x2ReplaceLane:
+ case kS390_I64x2ExtractLane:
+ case kS390_I64x2Eq:
+ case kS390_I64x2Ne:
+ case kS390_I64x2GtS:
+ case kS390_I64x2GeS:
+ case kS390_I64x2GtU:
+ case kS390_I64x2GeU:
+ case kS390_I64x2MinS:
+ case kS390_I64x2MinU:
+ case kS390_I64x2MaxS:
+ case kS390_I64x2MaxU:
case kS390_I32x4Splat:
case kS390_I32x4ExtractLane:
case kS390_I32x4ReplaceLane:
@@ -179,6 +225,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4ShrS:
case kS390_I32x4ShrU:
case kS390_I32x4Neg:
+ case kS390_I32x4SConvertF32x4:
+ case kS390_I32x4UConvertF32x4:
+ case kS390_I32x4SConvertI16x8Low:
+ case kS390_I32x4SConvertI16x8High:
+ case kS390_I32x4UConvertI16x8Low:
+ case kS390_I32x4UConvertI16x8High:
+ case kS390_I32x4Abs:
case kS390_I16x8Splat:
case kS390_I16x8ExtractLaneU:
case kS390_I16x8ExtractLaneS:
@@ -201,6 +254,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8ShrS:
case kS390_I16x8ShrU:
case kS390_I16x8Neg:
+ case kS390_I16x8SConvertI32x4:
+ case kS390_I16x8UConvertI32x4:
+ case kS390_I16x8SConvertI8x16Low:
+ case kS390_I16x8SConvertI8x16High:
+ case kS390_I16x8UConvertI8x16Low:
+ case kS390_I16x8UConvertI8x16High:
+ case kS390_I16x8AddSaturateS:
+ case kS390_I16x8SubSaturateS:
+ case kS390_I16x8AddSaturateU:
+ case kS390_I16x8SubSaturateU:
+ case kS390_I16x8RoundingAverageU:
+ case kS390_I16x8Abs:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
@@ -222,9 +287,21 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16ShrS:
case kS390_I8x16ShrU:
case kS390_I8x16Neg:
+ case kS390_I8x16SConvertI16x8:
+ case kS390_I8x16UConvertI16x8:
+ case kS390_I8x16AddSaturateS:
+ case kS390_I8x16SubSaturateS:
+ case kS390_I8x16AddSaturateU:
+ case kS390_I8x16SubSaturateU:
+ case kS390_I8x16RoundingAverageU:
+ case kS390_I8x16Abs:
+ case kS390_S8x16Shuffle:
+ case kS390_S8x16Swizzle:
+ case kS390_S1x2AnyTrue:
case kS390_S1x4AnyTrue:
case kS390_S1x8AnyTrue:
case kS390_S1x16AnyTrue:
+ case kS390_S1x2AllTrue:
case kS390_S1x4AllTrue:
case kS390_S1x8AllTrue:
case kS390_S1x16AllTrue:
@@ -234,6 +311,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_S128Zero:
case kS390_S128Not:
case kS390_S128Select:
+ case kS390_S128AndNot:
return kNoOpcodeFlags;
case kS390_LoadWordS8:
@@ -250,6 +328,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverse32:
case kS390_LoadReverse64:
case kS390_Peek:
+ case kS390_LoadDecompressTaggedSigned:
+ case kS390_LoadDecompressTaggedPointer:
+ case kS390_LoadDecompressAnyTagged:
return kIsLoadOperation;
case kS390_StoreWord8:
@@ -263,6 +344,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_StoreFloat32:
case kS390_StoreDouble:
case kS390_StoreSimd128:
+ case kS390_StoreCompressTagged:
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index b66530f1dc..fdffb30e00 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -154,10 +154,12 @@ class S390OperandGenerator final : public OperandGenerator {
switch (opcode) {
case kS390_Cmp64:
case kS390_LoadAndTestWord64:
- return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
+ return rep == MachineRepresentation::kWord64 ||
+ (!COMPRESS_POINTERS_BOOL && IsAnyTagged(rep));
case kS390_LoadAndTestWord32:
case kS390_Cmp32:
- return rep == MachineRepresentation::kWord32;
+ return rep == MachineRepresentation::kWord32 ||
+ (COMPRESS_POINTERS_BOOL && IsAnyTagged(rep));
default:
break;
}
@@ -285,29 +287,38 @@ ArchOpcode SelectLoadOpcode(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
break;
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#endif
case MachineRepresentation::kWord32:
opcode = kS390_LoadWordU32;
break;
- case MachineRepresentation::kSimd128:
- opcode = kS390_LoadSimd128;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kS390_LoadWordS32;
break;
-#if V8_TARGET_ARCH_S390X
+#else
+ UNREACHABLE();
+#endif
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kS390_LoadDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kS390_LoadDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kS390_LoadDecompressAnyTagged;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kS390_LoadWord64;
break;
-#else
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kS390_LoadSimd128;
+ break;
case MachineRepresentation::kNone:
default:
UNREACHABLE();
@@ -683,8 +694,7 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
void InstructionSelector::VisitLoad(Node* node) {
S390OperandGenerator g(this);
InstructionCode opcode = SelectLoadOpcode(node);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
@@ -694,7 +704,6 @@ void InstructionSelector::VisitLoad(Node* node) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
-
Emit(opcode, 1, outputs, input_count, inputs);
}
@@ -714,7 +723,7 @@ static void VisitGeneralStore(
Node* value = node->InputAt(2);
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
- DCHECK(CanBeTaggedPointer(rep));
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -754,13 +763,6 @@ static void VisitGeneralStore(
case MachineRepresentation::kWord16:
opcode = kS390_StoreWord16;
break;
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
-#endif
case MachineRepresentation::kWord32:
opcode = kS390_StoreWord32;
if (m.IsWord32ReverseBytes()) {
@@ -768,19 +770,19 @@ static void VisitGeneralStore(
value = value->InputAt(0);
}
break;
- case MachineRepresentation::kSimd128:
- opcode = kS390_StoreSimd128;
- if (m.IsSimd128ReverseBytes()) {
- opcode = kS390_StoreReverseSimd128;
- value = value->InputAt(0);
- }
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kS390_StoreCompressTagged;
+ break;
+#else
+ UNREACHABLE();
+#endif
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kS390_StoreCompressTagged;
+ break;
case MachineRepresentation::kWord64:
opcode = kS390_StoreWord64;
if (m.IsWord64ReverseBytes()) {
@@ -788,9 +790,13 @@ static void VisitGeneralStore(
value = value->InputAt(0);
}
break;
-#else
- case MachineRepresentation::kWord64: // Fall through.
-#endif
+ case MachineRepresentation::kSimd128:
+ opcode = kS390_StoreSimd128;
+ if (m.IsSimd128ReverseBytes()) {
+ opcode = kS390_StoreReverseSimd128;
+ value = value->InputAt(0);
+ }
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -2068,7 +2074,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
S390OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range();
@@ -2222,12 +2228,21 @@ void InstructionSelector::EmitPrepareArguments(
int num_slots = 0;
int slot = 0;
+#define INPUT_SWITCH(param) \
+ switch (input.location.GetType().representation()) { \
+ case MachineRepresentation::kSimd128: \
+ param += kSimd128Size / kSystemPointerSize; \
+ break; \
+ case MachineRepresentation::kFloat64: \
+ param += kDoubleSize / kSystemPointerSize; \
+ break; \
+ default: \
+ param += 1; \
+ break; \
+ }
for (PushParameter input : *arguments) {
if (input.node == nullptr) continue;
- num_slots += input.location.GetType().representation() ==
- MachineRepresentation::kFloat64
- ? kDoubleSize / kSystemPointerSize
- : 1;
+ INPUT_SWITCH(num_slots)
}
Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
for (PushParameter input : *arguments) {
@@ -2235,12 +2250,10 @@ void InstructionSelector::EmitPrepareArguments(
if (input.node) {
Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
- slot += input.location.GetType().representation() ==
- MachineRepresentation::kFloat64
- ? (kDoubleSize / kSystemPointerSize)
- : 1;
+ INPUT_SWITCH(slot)
}
}
+#undef INPUT_SWITCH
DCHECK(num_slots == slot);
}
}
@@ -2512,12 +2525,24 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
#define SIMD_TYPES(V) \
+ V(F64x2) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F64x2Min) \
+ V(F64x2Max) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
@@ -2526,6 +2551,22 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I64x2GtU) \
+ V(I64x2GeU) \
+ V(I64x2MinS) \
+ V(I64x2MinU) \
+ V(I64x2MaxS) \
+ V(I64x2MaxU) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2554,6 +2595,13 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I16x8GeS) \
V(I16x8GtU) \
V(I16x8GeU) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
+ V(I16x8AddSaturateS) \
+ V(I16x8SubSaturateS) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
V(I8x16Mul) \
@@ -2567,39 +2615,74 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16GeS) \
V(I8x16GtU) \
V(I8x16GeU) \
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16AddSaturateS) \
+ V(I8x16SubSaturateS) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16RoundingAverageU) \
V(S128And) \
V(S128Or) \
- V(S128Xor)
-
-#define SIMD_UNOP_LIST(V) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(I32x4Neg) \
- V(I16x8Neg) \
- V(I8x16Neg) \
+ V(S128Xor) \
+ V(S128AndNot)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(F32x4Sqrt) \
+ V(I64x2Neg) \
+ V(I16x8Abs) \
+ V(I32x4Neg) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4Abs) \
+ V(I16x8Neg) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I8x16Neg) \
+ V(I8x16Abs) \
V(S128Not)
-#define SIMD_SHIFT_OPCODES(V) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8ShrU) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
+ V(S1x2AnyTrue) \
V(S1x4AnyTrue) \
V(S1x8AnyTrue) \
V(S1x16AnyTrue) \
+ V(S1x2AllTrue) \
V(S1x4AllTrue) \
V(S1x8AllTrue) \
V(S1x16AllTrue)
+#define SIMD_CONVERSION_LIST(V) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4)
+
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
S390OperandGenerator g(this); \
@@ -2616,7 +2699,9 @@ SIMD_TYPES(SIMD_VISIT_SPLAT)
Emit(kS390_##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
@@ -2666,9 +2751,9 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
g.UseUniqueRegister(node->InputAt(0)), \
g.UseUniqueRegister(node->InputAt(1))); \
}
-SIMD_SHIFT_OPCODES(SIMD_VISIT_SHIFT)
+SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
#undef SIMD_VISIT_SHIFT
-#undef SIMD_SHIFT_OPCODES
+#undef SIMD_SHIFT_LIST
#define SIMD_VISIT_BOOL(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2679,11 +2764,81 @@ SIMD_SHIFT_OPCODES(SIMD_VISIT_SHIFT)
}
SIMD_BOOL_LIST(SIMD_VISIT_BOOL)
#undef SIMD_VISIT_BOOL
+#undef SIMD_BOOL_LIST
+
+#define SIMD_VISIT_CONVERSION(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ S390OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kS390_##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps); \
+ }
+SIMD_CONVERSION_LIST(SIMD_VISIT_CONVERSION)
+#undef SIMD_VISIT_CONVERSION
+#undef SIMD_CONVERSION_LIST
+
+#define SIMD_VISIT_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ S390OperandGenerator g(this); \
+ Emit(kS390_##Opcode, g.DefineSameAsFirst(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ }
+SIMD_VISIT_QFMOP(F64x2Qfma)
+SIMD_VISIT_QFMOP(F64x2Qfms)
+SIMD_VISIT_QFMOP(F32x4Qfma)
+SIMD_VISIT_QFMOP(F32x4Qfms)
+#undef SIMD_VISIT_QFMOP
#undef SIMD_TYPES
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ S390OperandGenerator g(this);
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ // input registers are each in reverse order, we will have to remap the
+ // shuffle indices
+ int max_index = 15;
+ int total_lane_count = 2 * kSimd128Size;
+ uint8_t shuffle_remapped[kSimd128Size];
+ for (int i = 0; i < kSimd128Size; i++) {
+ uint8_t current_index = shuffle[i];
+ shuffle_remapped[i] = (current_index <= max_index
+ ? max_index - current_index
+ : total_lane_count - current_index + max_index);
+ }
+ Emit(kS390_S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ // Pack4Lanes reverses the bytes, therefore we will need to pass it in
+ // reverse
+ g.UseImmediate(Pack4Lanes(shuffle_remapped + 12)),
+ g.UseImmediate(Pack4Lanes(shuffle_remapped + 8)),
+ g.UseImmediate(Pack4Lanes(shuffle_remapped + 4)),
+ g.UseImmediate(Pack4Lanes(shuffle_remapped)));
+#else
+ Emit(kS390_S8x16Shuffle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(input0), g.UseUniqueRegister(input1),
+ g.UseImmediate(Pack4Lanes(shuffle)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12)));
+#endif
+}
+
+void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_S8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
void InstructionSelector::VisitS128Zero(Node* node) {
S390OperandGenerator g(this);
- Emit(kS390_S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+ Emit(kS390_S128Zero, g.DefineAsRegister(node));
}
void InstructionSelector::VisitS128Select(Node* node) {
@@ -2693,48 +2848,6 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8RoundingAverageU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
@@ -2758,129 +2871,11 @@ void InstructionSelector::EmitPrepareResults(
}
}
-void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- UNIMPLEMENTED();
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ // We should never reach here, see http://crrev.com/c/2050811
+ UNREACHABLE();
}
-void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
-
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h
index 7f195db51b..a288e219a8 100644
--- a/deps/v8/src/compiler/backend/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h
@@ -15,7 +15,7 @@
#include "src/compiler/backend/x64/unwinding-info-writer-x64.h"
#elif V8_TARGET_ARCH_S390X
#include "src/compiler/backend/s390/unwinding-info-writer-s390.h"
-#elif V8_TARGET_ARCH_PPC64
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/compiler/backend/ppc/unwinding-info-writer-ppc.h"
#else
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 571502be77..472ffaa508 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -580,25 +580,41 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
ASSEMBLE_SIMD_INSTR(opcode, dst, input_index); \
} while (false)
-#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
+#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, imm) \
do { \
- CpuFeatureScope sse_scope(tasm(), SSELevel); \
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
__ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \
} while (false)
-#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
- do { \
- CpuFeatureScope sse_scope(tasm(), SSE4_1); \
- Register dst = i.OutputRegister(); \
- Register tmp1 = i.TempRegister(0); \
- XMMRegister tmp2 = i.TempSimd128Register(1); \
- __ movq(tmp1, Immediate(1)); \
- __ xorq(dst, dst); \
- __ pxor(tmp2, tmp2); \
- __ opcode(tmp2, i.InputSimd128Register(0)); \
- __ ptest(tmp2, tmp2); \
- __ cmovq(zero, dst, tmp1); \
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ CpuFeatureScope sse_scope(tasm(), SSE4_1); \
+ Register dst = i.OutputRegister(); \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ __ xorq(dst, dst); \
+ __ Pxor(tmp, tmp); \
+ __ opcode(tmp, i.InputSimd128Register(0)); \
+ __ Ptest(tmp, tmp); \
+ __ setcc(equal, dst); \
+ } while (false)
+
+// This macro will directly emit the opcode if the shift is an immediate - the
+// shift value will be taken modulo 2^width. Otherwise, it will emit code to
+// perform the modulus operation.
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ if (HasImmediateInput(instr, 1)) { \
+ __ opcode(dst, static_cast<byte>(i.InputInt##width(1))); \
+ } else { \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ Register shift = i.InputRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ andq(shift, Immediate(mask)); \
+ __ Movq(tmp, shift); \
+ __ opcode(dst, tmp); \
+ } \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -969,9 +985,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -992,7 +1005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
- __ int3();
+ __ DebugBreak();
break;
case kArchThrowTerminator:
unwinding_info_writer_.MarkBlockWillExit();
@@ -2634,12 +2647,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Shl: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 8.
- __ andq(shift, Immediate(63));
- __ movq(tmp, shift);
- __ psllq(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^6.
+ ASSEMBLE_SIMD_SHIFT(Psllq, 6);
break;
}
case kX64I64x2ShrS: {
@@ -2652,14 +2661,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Modulo 64 not required as sarq_cl will mask cl to 6 bits.
// lower quadword
- __ pextrq(tmp, src, 0x0);
+ __ Pextrq(tmp, src, static_cast<int8_t>(0x0));
__ sarq_cl(tmp);
- __ pinsrq(dst, tmp, 0x0);
+ __ Pinsrq(dst, tmp, static_cast<int8_t>(0x0));
// upper quadword
- __ pextrq(tmp, src, 0x1);
+ __ Pextrq(tmp, src, static_cast<int8_t>(0x1));
__ sarq_cl(tmp);
- __ pinsrq(dst, tmp, 0x1);
+ __ Pinsrq(dst, tmp, static_cast<int8_t>(0x1));
break;
}
case kX64I64x2Add: {
@@ -2788,12 +2797,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2ShrU: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 64.
- __ andq(shift, Immediate(63));
- __ movq(tmp, shift);
- __ psrlq(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^6.
+ ASSEMBLE_SIMD_SHIFT(Psrlq, 6);
break;
}
case kX64I64x2MinU: {
@@ -2904,18 +2909,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0
- __ movaps(tmp, dst);
- __ cmpeqps(tmp, tmp);
- __ pand(dst, tmp);
+ __ Movaps(tmp, dst);
+ __ Cmpeqps(tmp, tmp);
+ __ Pand(dst, tmp);
// Set top bit if >= 0 (but not -0.0!)
- __ pxor(tmp, dst);
+ __ Pxor(tmp, dst);
// Convert
- __ cvttps2dq(dst, dst);
+ __ Cvttps2dq(dst, dst);
// Set top bit if >=0 is now < 0
- __ pand(tmp, dst);
- __ psrad(tmp, 31);
+ __ Pand(tmp, dst);
+ __ Psrad(tmp, static_cast<byte>(31));
// Set positive overflow lanes to 0x7FFFFFFF
- __ pxor(dst, tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I32x4SConvertI16x8Low: {
@@ -2943,21 +2948,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Shl: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ andq(shift, Immediate(31));
- __ Movq(tmp, shift);
- __ Pslld(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^5.
+ ASSEMBLE_SIMD_SHIFT(Pslld, 5);
break;
}
case kX64I32x4ShrS: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ andq(shift, Immediate(31));
- __ Movq(tmp, shift);
- __ Psrad(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^5.
+ ASSEMBLE_SIMD_SHIFT(Psrad, 5);
break;
}
case kX64I32x4Add: {
@@ -2966,7 +2963,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4AddHoriz: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
- __ phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4Sub: {
@@ -3018,26 +3015,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister tmp = i.TempSimd128Register(0);
XMMRegister tmp2 = i.TempSimd128Register(1);
// NAN->0, negative->0
- __ pxor(tmp2, tmp2);
- __ maxps(dst, tmp2);
+ __ Pxor(tmp2, tmp2);
+ __ Maxps(dst, tmp2);
// scratch: float representation of max_signed
- __ pcmpeqd(tmp2, tmp2);
- __ psrld(tmp2, 1); // 0x7fffffff
- __ cvtdq2ps(tmp2, tmp2); // 0x4f000000
+ __ Pcmpeqd(tmp2, tmp2);
+ __ Psrld(tmp2, static_cast<uint8_t>(1)); // 0x7fffffff
+ __ Cvtdq2ps(tmp2, tmp2); // 0x4f000000
// tmp: convert (src-max_signed).
// Positive overflow lanes -> 0x7FFFFFFF
// Negative lanes -> 0
- __ movaps(tmp, dst);
- __ subps(tmp, tmp2);
- __ cmpleps(tmp2, tmp);
- __ cvttps2dq(tmp, tmp);
- __ pxor(tmp, tmp2);
- __ pxor(tmp2, tmp2);
- __ pmaxsd(tmp, tmp2);
+ __ Movaps(tmp, dst);
+ __ Subps(tmp, tmp2);
+ __ Cmpleps(tmp2, tmp);
+ __ Cvttps2dq(tmp, tmp);
+ __ Pxor(tmp, tmp2);
+ __ Pxor(tmp2, tmp2);
+ __ Pmaxsd(tmp, tmp2);
// convert. Overflow lanes above max_signed will be 0x80000000
- __ cvttps2dq(dst, dst);
+ __ Cvttps2dq(dst, dst);
// Add (src-max_signed) for overflow lanes.
- __ paddd(dst, tmp);
+ __ Paddd(dst, tmp);
break;
}
case kX64I32x4UConvertI16x8Low: {
@@ -3051,12 +3048,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ShrU: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 32.
- __ andq(shift, Immediate(31));
- __ Movq(tmp, shift);
- __ Psrld(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^5.
+ ASSEMBLE_SIMD_SHIFT(Psrld, 5);
break;
}
case kX64I32x4MinU: {
@@ -3088,37 +3081,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pcmpeqd(dst, src);
break;
}
+ case kX64I32x4Abs: {
+ __ Pabsd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
- __ xorps(dst, dst);
+ __ Xorps(dst, dst);
break;
}
case kX64I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ movd(dst, i.InputRegister(0));
+ __ Movd(dst, i.InputRegister(0));
} else {
- __ movd(dst, i.InputOperand(0));
+ __ Movd(dst, i.InputOperand(0));
}
- __ pshuflw(dst, dst, 0x0);
- __ pshufd(dst, dst, 0x0);
+ __ Pshuflw(dst, dst, static_cast<uint8_t>(0x0));
+ __ Pshufd(dst, dst, static_cast<uint8_t>(0x0));
break;
}
case kX64I16x8ExtractLaneU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I16x8ExtractLaneS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxwl(dst, dst);
break;
}
case kX64I16x8ReplaceLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
__ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -3138,103 +3132,88 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8Neg: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psignw(dst, kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignw(dst, kScratchDoubleReg);
} else {
- __ pxor(dst, dst);
- __ psubw(dst, src);
+ __ Pxor(dst, dst);
+ __ Psubw(dst, src);
}
break;
}
case kX64I16x8Shl: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ andq(shift, Immediate(15));
- __ movq(tmp, shift);
- __ psllw(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^4.
+ ASSEMBLE_SIMD_SHIFT(Psllw, 4);
break;
}
case kX64I16x8ShrS: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ andq(shift, Immediate(15));
- __ movq(tmp, shift);
- __ psraw(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^4.
+ ASSEMBLE_SIMD_SHIFT(Psraw, 4);
break;
}
case kX64I16x8SConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Add: {
- __ paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8AddSaturateS: {
- __ paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8AddHoriz: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
- __ phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Sub: {
- __ psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8SubSaturateS: {
- __ psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Mul: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MinS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Eq: {
- __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ pcmpeqw(tmp, tmp);
- __ pxor(i.OutputSimd128Register(), tmp);
+ __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqw(tmp, tmp);
+ __ Pxor(i.OutputSimd128Register(), tmp);
break;
}
case kX64I16x8GtS: {
- __ pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8GeS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminsw(dst, src);
- __ pcmpeqw(dst, src);
+ __ Pminsw(dst, src);
+ __ Pcmpeqw(dst, src);
break;
}
case kX64I16x8UConvertI8x16Low: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmovzxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Pmovzxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kX64I16x8UConvertI8x16High: {
@@ -3244,63 +3223,57 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8ShrU: {
- XMMRegister tmp = i.TempSimd128Register(0);
- Register shift = i.InputRegister(1);
- // Take shift value modulo 16.
- __ andq(shift, Immediate(15));
- __ movq(tmp, shift);
- __ psrlw(i.OutputSimd128Register(), tmp);
+ // Take shift value modulo 2^4.
+ ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
break;
}
case kX64I16x8UConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8AddSaturateU: {
- __ paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8SubSaturateU: {
- __ psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MinU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8GtU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
- __ pmaxuw(dst, src);
- __ pcmpeqw(dst, src);
- __ pcmpeqw(tmp, tmp);
- __ pxor(dst, tmp);
+ __ Pmaxuw(dst, src);
+ __ Pcmpeqw(dst, src);
+ __ Pcmpeqw(tmp, tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I16x8GeU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminuw(dst, src);
- __ pcmpeqw(dst, src);
+ __ Pminuw(dst, src);
+ __ Pcmpeqw(dst, src);
break;
}
case kX64I16x8RoundingAverageU: {
__ Pavgw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I16x8Abs: {
+ __ Pabsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64I8x16Splat: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
__ Movd(dst, i.InputRegister(0));
@@ -3312,20 +3285,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16ExtractLaneU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I8x16ExtractLaneS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxbl(dst, dst);
break;
}
case kX64I8x16ReplaceLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
__ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -3336,77 +3306,98 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16SConvertI16x8: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packsswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16Neg: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psignb(dst, kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psignb(dst, kScratchDoubleReg);
} else {
- __ pxor(dst, dst);
- __ psubb(dst, src);
+ __ Pxor(dst, dst);
+ __ Psubb(dst, src);
}
break;
}
case kX64I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask andadditional moves to XMM registers.
+ // Temp registers for shift mask and additional moves to XMM registers.
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
- Register shift = i.InputRegister(1);
- // Mask off the unwanted bits before word-shifting.
- __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- // Take shift value modulo 8.
- __ andq(shift, Immediate(7));
- __ movq(tmp, shift);
- __ addq(tmp, Immediate(8));
- __ movq(tmp_simd, tmp);
- __ psrlw(kScratchDoubleReg, tmp_simd);
- __ packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ pand(dst, kScratchDoubleReg);
- __ movq(tmp_simd, shift);
- __ psllw(dst, tmp_simd);
+ if (HasImmediateInput(instr, 1)) {
+ // Perform 16-bit shift, then mask away low bits.
+ uint8_t shift = i.InputInt3(1);
+ __ Psllw(dst, static_cast<byte>(shift));
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ __ movl(tmp, Immediate(mask));
+ __ Movd(tmp_simd, tmp);
+ __ Pshufd(tmp_simd, tmp_simd, static_cast<uint8_t>(0));
+ __ Pand(dst, tmp_simd);
+ } else {
+ Register shift = i.InputRegister(1);
+ // Mask off the unwanted bits before word-shifting.
+ __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ // Take shift value modulo 8.
+ __ andq(shift, Immediate(7));
+ __ movq(tmp, shift);
+ __ addq(tmp, Immediate(8));
+ __ Movq(tmp_simd, tmp);
+ __ Psrlw(kScratchDoubleReg, tmp_simd);
+ __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pand(dst, kScratchDoubleReg);
+ __ Movq(tmp_simd, shift);
+ __ Psllw(dst, tmp_simd);
+ }
break;
}
case kX64I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ punpckhbw(kScratchDoubleReg, dst);
- __ punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ movq(tmp_simd, tmp);
- __ psraw(kScratchDoubleReg, tmp_simd);
- __ psraw(dst, tmp_simd);
- __ packsswb(dst, kScratchDoubleReg);
+ if (HasImmediateInput(instr, 1)) {
+ __ Punpckhbw(kScratchDoubleReg, dst);
+ __ Punpcklbw(dst, dst);
+ uint8_t shift = i.InputInt3(1) + 8;
+ __ Psraw(kScratchDoubleReg, shift);
+ __ Psraw(dst, shift);
+ __ Packsswb(dst, kScratchDoubleReg);
+ } else {
+ // Temp registers for shift mask andadditional moves to XMM registers.
+ Register tmp = i.ToRegister(instr->TempAt(0));
+ XMMRegister tmp_simd = i.TempSimd128Register(1);
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ __ Punpckhbw(kScratchDoubleReg, dst);
+ __ Punpcklbw(dst, dst);
+ // Prepare shift value
+ __ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(tmp, Immediate(7));
+ __ addq(tmp, Immediate(8));
+ __ Movq(tmp_simd, tmp);
+ __ Psraw(kScratchDoubleReg, tmp_simd);
+ __ Psraw(dst, tmp_simd);
+ __ Packsswb(dst, kScratchDoubleReg);
+ }
break;
}
case kX64I8x16Add: {
- __ paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16AddSaturateS: {
- __ paddsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Paddsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16Sub: {
- __ psubb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16SubSaturateS: {
- __ psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16Mul: {
@@ -3419,66 +3410,62 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// right= BBbb BBbb ... BBbb BBbb
// t = 00AA 00AA ... 00AA 00AA
// s = 00BB 00BB ... 00BB 00BB
- __ movaps(tmp, dst);
- __ movaps(kScratchDoubleReg, right);
- __ psrlw(tmp, 8);
- __ psrlw(kScratchDoubleReg, 8);
+ __ Movaps(tmp, dst);
+ __ Movaps(kScratchDoubleReg, right);
+ __ Psrlw(tmp, static_cast<byte>(8));
+ __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
// dst = left * 256
- __ psllw(dst, 8);
+ __ Psllw(dst, static_cast<byte>(8));
// t = I16x8Mul(t, s)
// => __PP __PP ... __PP __PP
- __ pmullw(tmp, kScratchDoubleReg);
+ __ Pmullw(tmp, kScratchDoubleReg);
// dst = I16x8Mul(left * 256, right)
// => pp__ pp__ ... pp__ pp__
- __ pmullw(dst, right);
+ __ Pmullw(dst, right);
// t = I16x8Shl(t, 8)
// => PP00 PP00 ... PP00 PP00
- __ psllw(tmp, 8);
+ __ Psllw(tmp, static_cast<byte>(8));
// dst = I16x8Shr(dst, 8)
// => 00pp 00pp ... 00pp 00pp
- __ psrlw(dst, 8);
+ __ Psrlw(dst, static_cast<byte>(8));
// dst = I16x8Or(dst, t)
// => PPpp PPpp ... PPpp PPpp
- __ por(dst, tmp);
+ __ Por(dst, tmp);
break;
}
case kX64I8x16MinS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MaxS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16Eq: {
- __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ pcmpeqb(tmp, tmp);
- __ pxor(i.OutputSimd128Register(), tmp);
+ __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqb(tmp, tmp);
+ __ Pxor(i.OutputSimd128Register(), tmp);
break;
}
case kX64I8x16GtS: {
- __ pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16GeS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminsb(dst, src);
- __ pcmpeqb(dst, src);
+ __ Pminsb(dst, src);
+ __ Pcmpeqb(dst, src);
break;
}
case kX64I8x16UConvertI16x8: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packuswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16ShrU: {
@@ -3488,17 +3475,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Temp registers for shift mask andadditional moves to XMM registers.
Register tmp = i.ToRegister(instr->TempAt(0));
XMMRegister tmp_simd = i.TempSimd128Register(1);
- __ punpckhbw(kScratchDoubleReg, dst);
- __ punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ movq(tmp_simd, tmp);
- __ psrlw(kScratchDoubleReg, tmp_simd);
- __ psrlw(dst, tmp_simd);
- __ packuswb(dst, kScratchDoubleReg);
+ if (HasImmediateInput(instr, 1)) {
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = i.InputInt3(1);
+ __ Psrlw(dst, static_cast<byte>(shift));
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ __ movl(tmp, Immediate(mask));
+ __ Movd(tmp_simd, tmp);
+ __ Pshufd(tmp_simd, tmp_simd, static_cast<byte>(0));
+ __ Pand(dst, tmp_simd);
+ } else {
+ __ Punpckhbw(kScratchDoubleReg, dst);
+ __ Punpcklbw(dst, dst);
+ // Prepare shift value
+ __ movq(tmp, i.InputRegister(1));
+ // Take shift value modulo 8.
+ __ andq(tmp, Immediate(7));
+ __ addq(tmp, Immediate(8));
+ __ Movq(tmp_simd, tmp);
+ __ Psrlw(kScratchDoubleReg, tmp_simd);
+ __ Psrlw(dst, tmp_simd);
+ __ Packuswb(dst, kScratchDoubleReg);
+ }
break;
}
case kX64I8x16AddSaturateU: {
@@ -3506,64 +3506,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16SubSaturateU: {
- __ psubusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Psubusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MinU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MaxU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16GtU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
- __ pmaxub(dst, src);
- __ pcmpeqb(dst, src);
- __ pcmpeqb(tmp, tmp);
- __ pxor(dst, tmp);
+ __ Pmaxub(dst, src);
+ __ Pcmpeqb(dst, src);
+ __ Pcmpeqb(tmp, tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I8x16GeU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
- __ pminub(dst, src);
- __ pcmpeqb(dst, src);
+ __ Pminub(dst, src);
+ __ Pcmpeqb(dst, src);
break;
}
case kX64I8x16RoundingAverageU: {
__ Pavgb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I8x16Abs: {
+ __ Pabsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64S128And: {
- __ pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64S128Or: {
- __ por(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Por(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64S128Xor: {
- __ pxor(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64S128Not: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ movaps(kScratchDoubleReg, dst);
- __ pcmpeqd(dst, dst);
- __ pxor(dst, kScratchDoubleReg);
+ __ Movaps(kScratchDoubleReg, dst);
+ __ Pcmpeqd(dst, dst);
+ __ Pxor(dst, kScratchDoubleReg);
} else {
- __ pcmpeqd(dst, dst);
- __ pxor(dst, src);
+ __ Pcmpeqd(dst, dst);
+ __ Pxor(dst, src);
}
break;
@@ -3586,7 +3586,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64S8x16Swizzle: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
XMMRegister mask = i.TempSimd128Register(0);
@@ -3718,129 +3717,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S32x4Swizzle: {
DCHECK_EQ(2, instr->InputCount());
- ASSEMBLE_SIMD_IMM_INSTR(pshufd, i.OutputSimd128Register(), 0,
- i.InputInt8(1));
+ ASSEMBLE_SIMD_IMM_INSTR(Pshufd, i.OutputSimd128Register(), 0,
+ i.InputUint8(1));
break;
}
case kX64S32x4Shuffle: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
DCHECK_EQ(4, instr->InputCount()); // Swizzles should be handled above.
- int8_t shuffle = i.InputInt8(2);
+ uint8_t shuffle = i.InputUint8(2);
DCHECK_NE(0xe4, shuffle); // A simple blend should be handled below.
- ASSEMBLE_SIMD_IMM_INSTR(pshufd, kScratchDoubleReg, 1, shuffle);
- ASSEMBLE_SIMD_IMM_INSTR(pshufd, i.OutputSimd128Register(), 0, shuffle);
- __ pblendw(i.OutputSimd128Register(), kScratchDoubleReg, i.InputInt8(3));
+ ASSEMBLE_SIMD_IMM_INSTR(Pshufd, kScratchDoubleReg, 1, shuffle);
+ ASSEMBLE_SIMD_IMM_INSTR(Pshufd, i.OutputSimd128Register(), 0, shuffle);
+ __ Pblendw(i.OutputSimd128Register(), kScratchDoubleReg, i.InputUint8(3));
break;
}
case kX64S16x8Blend: {
- ASSEMBLE_SIMD_IMM_SHUFFLE(pblendw, SSE4_1, i.InputInt8(2));
+ ASSEMBLE_SIMD_IMM_SHUFFLE(Pblendw, i.InputUint8(2));
break;
}
case kX64S16x8HalfShuffle1: {
XMMRegister dst = i.OutputSimd128Register();
- ASSEMBLE_SIMD_IMM_INSTR(pshuflw, dst, 0, i.InputInt8(1));
- __ pshufhw(dst, dst, i.InputInt8(2));
+ ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, i.InputUint8(1));
+ __ Pshufhw(dst, dst, i.InputUint8(2));
break;
}
case kX64S16x8HalfShuffle2: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
- ASSEMBLE_SIMD_IMM_INSTR(pshuflw, kScratchDoubleReg, 1, i.InputInt8(2));
- __ pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputInt8(3));
- ASSEMBLE_SIMD_IMM_INSTR(pshuflw, dst, 0, i.InputInt8(2));
- __ pshufhw(dst, dst, i.InputInt8(3));
- __ pblendw(dst, kScratchDoubleReg, i.InputInt8(4));
+ ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, kScratchDoubleReg, 1, i.InputUint8(2));
+ __ Pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputUint8(3));
+ ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, i.InputUint8(2));
+ __ Pshufhw(dst, dst, i.InputUint8(3));
+ __ Pblendw(dst, kScratchDoubleReg, i.InputUint8(4));
break;
}
case kX64S8x16Alignr: {
- ASSEMBLE_SIMD_IMM_SHUFFLE(palignr, SSSE3, i.InputInt8(2));
+ ASSEMBLE_SIMD_IMM_SHUFFLE(Palignr, i.InputUint8(2));
break;
}
case kX64S16x8Dup: {
XMMRegister dst = i.OutputSimd128Register();
- int8_t lane = i.InputInt8(1) & 0x7;
- int8_t lane4 = lane & 0x3;
- int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ uint8_t lane = i.InputInt8(1) & 0x7;
+ uint8_t lane4 = lane & 0x3;
+ uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
- ASSEMBLE_SIMD_IMM_INSTR(pshuflw, dst, 0, half_dup);
- __ pshufd(dst, dst, 0);
+ ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, half_dup);
+ __ Pshufd(dst, dst, static_cast<uint8_t>(0));
} else {
- ASSEMBLE_SIMD_IMM_INSTR(pshufhw, dst, 0, half_dup);
- __ pshufd(dst, dst, 0xaa);
+ ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, half_dup);
+ __ Pshufd(dst, dst, static_cast<uint8_t>(0xaa));
}
break;
}
case kX64S8x16Dup: {
XMMRegister dst = i.OutputSimd128Register();
- int8_t lane = i.InputInt8(1) & 0xf;
+ uint8_t lane = i.InputInt8(1) & 0xf;
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (lane < 8) {
- __ punpcklbw(dst, dst);
+ __ Punpcklbw(dst, dst);
} else {
- __ punpckhbw(dst, dst);
+ __ Punpckhbw(dst, dst);
}
lane &= 0x7;
- int8_t lane4 = lane & 0x3;
- int8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
+ uint8_t lane4 = lane & 0x3;
+ uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
- __ pshuflw(dst, dst, half_dup);
- __ pshufd(dst, dst, 0);
+ __ Pshuflw(dst, dst, half_dup);
+ __ Pshufd(dst, dst, static_cast<uint8_t>(0));
} else {
- __ pshufhw(dst, dst, half_dup);
- __ pshufd(dst, dst, 0xaa);
+ __ Pshufhw(dst, dst, half_dup);
+ __ Pshufd(dst, dst, static_cast<uint8_t>(0xaa));
}
break;
}
case kX64S64x2UnpackHigh:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhqdq);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpckhqdq);
break;
case kX64S32x4UnpackHigh:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhdq);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpckhdq);
break;
case kX64S16x8UnpackHigh:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhwd);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpckhwd);
break;
case kX64S8x16UnpackHigh:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckhbw);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpckhbw);
break;
case kX64S64x2UnpackLow:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklqdq);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpcklqdq);
break;
case kX64S32x4UnpackLow:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpckldq);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpckldq);
break;
case kX64S16x8UnpackLow:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklwd);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpcklwd);
break;
case kX64S8x16UnpackLow:
- ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
+ ASSEMBLE_SIMD_PUNPCK_SHUFFLE(Punpcklbw);
break;
case kX64S16x8UnzipHigh: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (instr->InputCount() == 2) {
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
- __ psrld(kScratchDoubleReg, 16);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
+ __ Psrld(kScratchDoubleReg, static_cast<byte>(16));
src2 = kScratchDoubleReg;
}
- __ psrld(dst, 16);
- __ packusdw(dst, src2);
+ __ Psrld(dst, static_cast<byte>(16));
+ __ Packusdw(dst, src2);
break;
}
case kX64S16x8UnzipLow: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
if (instr->InputCount() == 2) {
- ASSEMBLE_SIMD_IMM_INSTR(pblendw, kScratchDoubleReg, 1, 0x55);
+ ASSEMBLE_SIMD_IMM_INSTR(Pblendw, kScratchDoubleReg, 1,
+ static_cast<uint8_t>(0x55));
src2 = kScratchDoubleReg;
}
- __ pblendw(dst, kScratchDoubleReg, 0xaa);
- __ packusdw(dst, src2);
+ __ Pblendw(dst, kScratchDoubleReg, static_cast<uint8_t>(0xaa));
+ __ Packusdw(dst, src2);
break;
}
case kX64S8x16UnzipHigh: {
@@ -3848,12 +3844,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (instr->InputCount() == 2) {
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
- __ psrlw(kScratchDoubleReg, 8);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
+ __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
src2 = kScratchDoubleReg;
}
- __ psrlw(dst, 8);
- __ packuswb(dst, src2);
+ __ Psrlw(dst, static_cast<byte>(8));
+ __ Packuswb(dst, src2);
break;
}
case kX64S8x16UnzipLow: {
@@ -3861,44 +3857,44 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (instr->InputCount() == 2) {
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
- __ psllw(kScratchDoubleReg, 8);
- __ psrlw(kScratchDoubleReg, 8);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
+ __ Psllw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
src2 = kScratchDoubleReg;
}
- __ psllw(dst, 8);
- __ psrlw(dst, 8);
- __ packuswb(dst, src2);
+ __ Psllw(dst, static_cast<byte>(8));
+ __ Psrlw(dst, static_cast<byte>(8));
+ __ Packuswb(dst, src2);
break;
}
case kX64S8x16TransposeLow: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ psllw(dst, 8);
+ __ Psllw(dst, static_cast<byte>(8));
if (instr->InputCount() == 1) {
- __ movups(kScratchDoubleReg, dst);
+ __ Movups(kScratchDoubleReg, dst);
} else {
DCHECK_EQ(2, instr->InputCount());
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
- __ psllw(kScratchDoubleReg, 8);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
+ __ Psllw(kScratchDoubleReg, static_cast<byte>(8));
}
- __ psrlw(dst, 8);
- __ por(dst, kScratchDoubleReg);
+ __ Psrlw(dst, static_cast<byte>(8));
+ __ Por(dst, kScratchDoubleReg);
break;
}
case kX64S8x16TransposeHigh: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ psrlw(dst, 8);
+ __ Psrlw(dst, static_cast<byte>(8));
if (instr->InputCount() == 1) {
- __ movups(kScratchDoubleReg, dst);
+ __ Movups(kScratchDoubleReg, dst);
} else {
DCHECK_EQ(2, instr->InputCount());
- ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 1);
- __ psrlw(kScratchDoubleReg, 8);
+ ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
+ __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
}
- __ psllw(kScratchDoubleReg, 8);
- __ por(dst, kScratchDoubleReg);
+ __ Psllw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Por(dst, kScratchDoubleReg);
break;
}
case kX64S8x8Reverse:
@@ -3909,14 +3905,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (arch_opcode != kX64S8x2Reverse) {
// First shuffle words into position.
- int8_t shuffle_mask = arch_opcode == kX64S8x4Reverse ? 0xB1 : 0x1B;
- __ pshuflw(dst, dst, shuffle_mask);
- __ pshufhw(dst, dst, shuffle_mask);
+ uint8_t shuffle_mask = arch_opcode == kX64S8x4Reverse ? 0xB1 : 0x1B;
+ __ Pshuflw(dst, dst, shuffle_mask);
+ __ Pshufhw(dst, dst, shuffle_mask);
}
- __ movaps(kScratchDoubleReg, dst);
- __ psrlw(kScratchDoubleReg, 8);
- __ psllw(dst, 8);
- __ por(dst, kScratchDoubleReg);
+ __ Movaps(kScratchDoubleReg, dst);
+ __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psllw(dst, static_cast<byte>(8));
+ __ Por(dst, kScratchDoubleReg);
break;
}
case kX64S1x2AnyTrue:
@@ -3926,11 +3922,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
- Register tmp = i.TempRegister(0);
- __ xorq(tmp, tmp);
- __ movq(dst, Immediate(1));
- __ ptest(src, src);
- __ cmovq(zero, dst, tmp);
+
+ __ xorq(dst, dst);
+ __ Ptest(src, src);
+ __ setcc(not_equal, dst);
break;
}
// Need to split up all the different lane structures because the
@@ -3942,15 +3937,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64S1x4AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(pcmpeqd);
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
}
case kX64S1x8AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqw);
break;
}
case kX64S1x16AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
case kWord32AtomicExchangeInt8: {
@@ -4065,6 +4060,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Word64AtomicCompareExchangeUint32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
+ // Zero-extend the 32 bit value to 64 bit.
+ __ movl(rax, rax);
break;
}
case kX64Word64AtomicCompareExchangeUint64: {
@@ -4133,6 +4130,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
+#undef ASSEMBLE_SIMD_SHIFT
namespace {
@@ -4297,16 +4295,6 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
cases.data() + cases.size());
}
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- X64OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ cmpl(input, Immediate(i.InputInt32(index + 0)));
- __ j(equal, GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -4376,7 +4364,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ pushq(kWasmInstanceRegister);
} else if (call_descriptor->IsWasmImportWrapper() ||
call_descriptor->IsWasmCapiFunction()) {
- // WASM import wrappers are passed a tuple in the place of the instance.
+ // Wasm import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 64e87430b7..be5ac0d6c3 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -240,6 +240,7 @@ namespace compiler {
V(X64I32x4MaxU) \
V(X64I32x4GtU) \
V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneU) \
V(X64I16x8ExtractLaneS) \
@@ -273,6 +274,7 @@ namespace compiler {
V(X64I16x8GtU) \
V(X64I16x8GeU) \
V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
V(X64I8x16Splat) \
V(X64I8x16ExtractLaneU) \
V(X64I8x16ExtractLaneS) \
@@ -301,6 +303,7 @@ namespace compiler {
V(X64I8x16GtU) \
V(X64I8x16GeU) \
V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
V(X64S128Zero) \
V(X64S128Not) \
V(X64S128And) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 6563ab5f7c..f4a74a4050 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -212,6 +212,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4MaxU:
case kX64I32x4GtU:
case kX64I32x4GeU:
+ case kX64I32x4Abs:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneU:
case kX64I16x8ExtractLaneS:
@@ -245,6 +246,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8GtU:
case kX64I16x8GeU:
case kX64I16x8RoundingAverageU:
+ case kX64I16x8Abs:
case kX64I8x16Splat:
case kX64I8x16ExtractLaneU:
case kX64I8x16ExtractLaneS:
@@ -273,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16GtU:
case kX64I8x16GeU:
case kX64I8x16RoundingAverageU:
+ case kX64I8x16Abs:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 1046ab9a06..392b207c8e 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -2212,7 +2212,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- // Emit either ArchTableSwitch or ArchLookupSwitch.
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range();
@@ -2729,12 +2729,15 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4Neg) \
V(I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High) \
+ V(I32x4Abs) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I16x8Neg) \
V(I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High) \
+ V(I16x8Abs) \
V(I8x16Neg) \
+ V(I8x16Abs) \
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
@@ -2749,7 +2752,6 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_NARROW_SHIFT_OPCODES(V) \
V(I8x16Shl) \
- V(I8x16ShrS) \
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
@@ -2806,13 +2808,18 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, S)
SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
#undef VISIT_SIMD_REPLACE_LANE
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempSimd128Register()}; \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
@@ -2822,9 +2829,15 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)), \
+ arraysize(temps), temps); \
+ } else { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
#undef VISIT_SIMD_NARROW_SHIFT
@@ -2862,23 +2875,22 @@ SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
#undef VISIT_SIMD_BINOP_ONE_TEMP
#undef SIMD_BINOP_ONE_TEMP_LIST
-#define VISIT_SIMD_ANYTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister()}; \
- Emit(kX64##Opcode, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+#define VISIT_SIMD_ANYTRUE(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0))); \
}
SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
#undef VISIT_SIMD_ANYTRUE
#undef SIMD_ANYTRUE_LIST
-#define VISIT_SIMD_ALLTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
- Emit(kX64##Opcode, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+#define VISIT_SIMD_ALLTRUE(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
}
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
@@ -3035,6 +3047,19 @@ void InstructionSelector::VisitI8x16Mul(Node* node) {
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
+void InstructionSelector::VisitI8x16ShrS(Node* node) {
+ X64OperandGenerator g(this);
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+ }
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index ffc149ea5d..1c8a606726 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -157,6 +157,11 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return Replace(dead());
}
SimplifyBranchCondition(node);
+ // Trigger revisits of the IfTrue/IfFalse projections, since they depend on
+ // the branch condition.
+ for (Node* const use : node->uses()) {
+ Revisit(use);
+ }
return TakeConditionsFromFirstControl(node);
}
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index f1d43fc1a6..cddbabd074 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -328,9 +328,7 @@ void BytecodeAnalysis::Analyze() {
if (bytecode == Bytecode::kSwitchOnGeneratorState) {
DCHECK_EQ(generator_switch_index, -1);
generator_switch_index = iterator.current_index();
- }
-
- if (bytecode == Bytecode::kJumpLoop) {
+ } else if (bytecode == Bytecode::kJumpLoop) {
// Every byte up to and including the last byte within the backwards jump
// instruction is considered part of the loop, set loop end accordingly.
int loop_end = current_offset + iterator.current_bytecode_size();
@@ -350,7 +348,16 @@ void BytecodeAnalysis::Analyze() {
if (analyze_liveness_) {
loop_end_index_queue_.push_back(iterator.current_index());
}
- } else if (loop_stack_.size() > 1) {
+ }
+
+ // We have to pop from loop_stack_ if:
+ // 1) We entered the body of the loop
+ // 2) If we have a JumpLoop that jumps to itself (i.e an empty loop)
+ bool pop_current_loop = loop_stack_.size() > 1 &&
+ (bytecode != Bytecode::kJumpLoop ||
+ iterator.GetJumpTargetOffset() == current_offset);
+
+ if (pop_current_loop) {
LoopStackEntry& current_loop = loop_stack_.top();
LoopInfo* current_loop_info = current_loop.loop_info;
@@ -553,10 +560,10 @@ void BytecodeAnalysis::Analyze() {
}
void BytecodeAnalysis::PushLoop(int loop_header, int loop_end) {
- DCHECK(loop_header < loop_end);
- DCHECK(loop_stack_.top().header_offset < loop_header);
- DCHECK(end_to_header_.find(loop_end) == end_to_header_.end());
- DCHECK(header_to_info_.find(loop_header) == header_to_info_.end());
+ DCHECK_LT(loop_header, loop_end);
+ DCHECK_LT(loop_stack_.top().header_offset, loop_header);
+ DCHECK_EQ(end_to_header_.find(loop_end), end_to_header_.end());
+ DCHECK_EQ(header_to_info_.find(loop_header), header_to_info_.end());
int parent_offset = loop_stack_.top().header_offset;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 064d7ee7e4..b59b5a1b84 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -164,6 +164,8 @@ class BytecodeGraphBuilder {
// to the given node and the output value produced by the node is combined.
// Conceptually this frame state is "after" a given operation.
void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
+ void PrepareFrameState(Node* node, OutputFrameStateCombine combine,
+ BailoutId bailout_id);
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index,
@@ -261,6 +263,11 @@ class BytecodeGraphBuilder {
// feedback. Returns kDisallowSpeculation if feedback is insufficient.
SpeculationMode GetSpeculationMode(int slot_id) const;
+ // Helpers for building the implicit FunctionEntry and IterationBody
+ // StackChecks.
+ void BuildFunctionEntryStackCheck();
+ void BuildIterationBodyStackCheck();
+
// Control flow plumbing.
void BuildJump();
void BuildJumpIf(Node* condition);
@@ -355,8 +362,6 @@ class BytecodeGraphBuilder {
currently_peeled_loop_offset_ = offset;
}
bool skip_first_stack_check() const { return skip_first_stack_check_; }
- bool visited_first_stack_check() const { return visited_first_stack_check_; }
- void set_visited_first_stack_check() { visited_first_stack_check_ = true; }
int current_exception_handler() const { return current_exception_handler_; }
void set_current_exception_handler(int index) {
current_exception_handler_ = index;
@@ -395,7 +400,6 @@ class BytecodeGraphBuilder {
int currently_peeled_loop_offset_;
const bool skip_first_stack_check_;
- bool visited_first_stack_check_ = false;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
@@ -1086,16 +1090,30 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
void BytecodeGraphBuilder::PrepareFrameState(Node* node,
OutputFrameStateCombine combine) {
if (OperatorProperties::HasFrameStateInput(node->op())) {
+ PrepareFrameState(node, combine,
+ BailoutId(bytecode_iterator().current_offset()));
+ }
+}
+
+void BytecodeGraphBuilder::PrepareFrameState(Node* node,
+ OutputFrameStateCombine combine,
+ BailoutId bailout_id) {
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
// Add the frame state for after the operation. The node in question has
// already been created and had a {Dead} frame state input up until now.
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
- BailoutId bailout_id(bytecode_iterator().current_offset());
+ DCHECK_IMPLIES(bailout_id.ToInt() == kFunctionEntryBytecodeOffset,
+ bytecode_iterator().current_offset() == 0);
+ // If we have kFunctionEntryBytecodeOffset as the bailout_id, we want to get
+ // the liveness at the moment of function entry. This is the same as the IN
+ // liveness of the first actual bytecode.
const BytecodeLivenessState* liveness_after =
- bytecode_analysis().GetOutLivenessFor(
- bytecode_iterator().current_offset());
+ bailout_id.ToInt() == kFunctionEntryBytecodeOffset
+ ? bytecode_analysis().GetInLivenessFor(0)
+ : bytecode_analysis().GetOutLivenessFor(bailout_id.ToInt());
Node* frame_state_after =
environment()->Checkpoint(bailout_id, combine, liveness_after);
@@ -1204,6 +1222,21 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset(
}
}
+void BytecodeGraphBuilder::BuildFunctionEntryStackCheck() {
+ if (!skip_first_stack_check()) {
+ Node* node =
+ NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
+ PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
+ BailoutId(kFunctionEntryBytecodeOffset));
+ }
+}
+
+void BytecodeGraphBuilder::BuildIterationBodyStackCheck() {
+ Node* node =
+ NewNode(javascript()->StackCheck(StackCheckKind::kJSIterationBody));
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
// We will iterate through the OSR loop, then its parent, and so on
// until we have reached the outmost loop containing the OSR loop. We do
// not generate nodes for anything before the outermost loop.
@@ -1307,6 +1340,8 @@ void BytecodeGraphBuilder::VisitBytecodes() {
// the last copies of the loops it contains) to be generated by the normal
// bytecode iteration below.
AdvanceToOsrEntryAndPeelLoops();
+ } else {
+ BuildFunctionEntryStackCheck();
}
bool has_one_shot_bytecode = false;
@@ -3229,7 +3264,10 @@ void BytecodeGraphBuilder::VisitJumpIfUndefinedOrNullConstant() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
-void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
+void BytecodeGraphBuilder::VisitJumpLoop() {
+ BuildIterationBodyStackCheck();
+ BuildJump();
+}
void BytecodeGraphBuilder::BuildSwitchOnSmi(Node* condition) {
interpreter::JumpTableTargetOffsets offsets =
@@ -3252,24 +3290,6 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
BuildSwitchOnSmi(acc_smi);
}
-void BytecodeGraphBuilder::VisitStackCheck() {
- // Note: The stack check kind is determined heuristically: we simply assume
- // that the first seen stack check is at function-entry, and all other stack
- // checks are at iteration-body. An alternative precise solution would be to
- // parameterize the StackCheck bytecode; but this has the caveat of increased
- // code size.
- StackCheckKind kind = StackCheckKind::kJSIterationBody;
- if (!visited_first_stack_check()) {
- set_visited_first_stack_check();
- kind = StackCheckKind::kJSFunctionEntry;
- if (skip_first_stack_check()) return;
- }
-
- PrepareEagerCheckpoint();
- Node* node = NewNode(javascript()->StackCheck(kind));
- environment()->RecordAfterState(node, Environment::kAttachFrameState);
-}
-
void BytecodeGraphBuilder::VisitSetPendingMessage() {
Node* previous_message = NewNode(javascript()->LoadMessage());
NewNode(javascript()->StoreMessage(), environment()->LookupAccumulator());
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 4c576b771a..4967f2bbfa 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -68,8 +68,7 @@ namespace {
(1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
(1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
(1 << x25.code()) | (1 << x26.code()) | (1 << x27.code()) | \
- (1 << x28.code()) | (1 << x29.code()) | (1 << x30.code())
-
+ (1 << x28.code())
#define CALLEE_SAVE_FP_REGISTERS \
(1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 2eee2aed15..302c429f0f 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -257,6 +257,11 @@ TNode<IntPtrT> CodeAssembler::IntPtrConstant(intptr_t value) {
return UncheckedCast<IntPtrT>(jsgraph()->IntPtrConstant(value));
}
+TNode<TaggedIndex> CodeAssembler::TaggedIndexConstant(intptr_t value) {
+ DCHECK(TaggedIndex::IsValid(value));
+ return UncheckedCast<TaggedIndex>(raw_assembler()->IntPtrConstant(value));
+}
+
TNode<Number> CodeAssembler::NumberConstant(double value) {
int smi_value;
if (DoubleToSmiInteger(value, &smi_value)) {
@@ -446,7 +451,7 @@ void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
return raw_assembler()->PopAndReturn(pop, value);
}
-void CodeAssembler::ReturnIf(Node* condition, TNode<Object> value) {
+void CodeAssembler::ReturnIf(TNode<BoolT> condition, TNode<Object> value) {
Label if_return(this), if_continue(this);
Branch(condition, &if_return, &if_continue);
Bind(&if_return);
@@ -494,13 +499,12 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(
- SloppyTNode<Object> value) {
+TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(TNode<Object> value) {
return UncheckedCast<Object>(
raw_assembler()->TaggedPoisonOnSpeculation(value));
}
-TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(SloppyTNode<WordT> value) {
+TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
}
@@ -550,7 +554,7 @@ CODE_ASSEMBLER_COMPARE(Word64Equal, Word64T, int64_t, ToInt64Constant, ==)
CODE_ASSEMBLER_COMPARE(Word64NotEqual, Word64T, int64_t, ToInt64Constant, !=)
#undef CODE_ASSEMBLER_COMPARE
-TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(SloppyTNode<Word32T> value) {
+TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(TNode<Word32T> value) {
if (raw_assembler()->machine()->Is64()) {
return UncheckedCast<UintPtrT>(
raw_assembler()->ChangeUint32ToUint64(value));
@@ -558,47 +562,43 @@ TNode<UintPtrT> CodeAssembler::ChangeUint32ToWord(SloppyTNode<Word32T> value) {
return ReinterpretCast<UintPtrT>(value);
}
-TNode<IntPtrT> CodeAssembler::ChangeInt32ToIntPtr(SloppyTNode<Word32T> value) {
+TNode<IntPtrT> CodeAssembler::ChangeInt32ToIntPtr(TNode<Word32T> value) {
if (raw_assembler()->machine()->Is64()) {
- return ReinterpretCast<IntPtrT>(raw_assembler()->ChangeInt32ToInt64(value));
+ return UncheckedCast<IntPtrT>(raw_assembler()->ChangeInt32ToInt64(value));
}
return ReinterpretCast<IntPtrT>(value);
}
TNode<IntPtrT> CodeAssembler::ChangeFloat64ToIntPtr(TNode<Float64T> value) {
if (raw_assembler()->machine()->Is64()) {
- return ReinterpretCast<IntPtrT>(
- raw_assembler()->ChangeFloat64ToInt64(value));
+ return UncheckedCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt64(value));
}
- return ReinterpretCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt32(value));
+ return UncheckedCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt32(value));
}
-TNode<UintPtrT> CodeAssembler::ChangeFloat64ToUintPtr(
- SloppyTNode<Float64T> value) {
+TNode<UintPtrT> CodeAssembler::ChangeFloat64ToUintPtr(TNode<Float64T> value) {
if (raw_assembler()->machine()->Is64()) {
- return ReinterpretCast<UintPtrT>(
+ return UncheckedCast<UintPtrT>(
raw_assembler()->ChangeFloat64ToUint64(value));
}
- return ReinterpretCast<UintPtrT>(
- raw_assembler()->ChangeFloat64ToUint32(value));
+ return UncheckedCast<UintPtrT>(raw_assembler()->ChangeFloat64ToUint32(value));
}
TNode<Float64T> CodeAssembler::ChangeUintPtrToFloat64(TNode<UintPtrT> value) {
if (raw_assembler()->machine()->Is64()) {
// TODO(turbofan): Maybe we should introduce a ChangeUint64ToFloat64
// machine operator to TurboFan here?
- return ReinterpretCast<Float64T>(
+ return UncheckedCast<Float64T>(
raw_assembler()->RoundUint64ToFloat64(value));
}
- return ReinterpretCast<Float64T>(
- raw_assembler()->ChangeUint32ToFloat64(value));
+ return UncheckedCast<Float64T>(raw_assembler()->ChangeUint32ToFloat64(value));
}
-Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
+TNode<Float64T> CodeAssembler::RoundIntPtrToFloat64(Node* value) {
if (raw_assembler()->machine()->Is64()) {
- return raw_assembler()->RoundInt64ToFloat64(value);
+ return UncheckedCast<Float64T>(raw_assembler()->RoundInt64ToFloat64(value));
}
- return raw_assembler()->ChangeInt32ToFloat64(value);
+ return UncheckedCast<Float64T>(raw_assembler()->ChangeInt32ToFloat64(value));
}
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
@@ -808,35 +808,6 @@ Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler()->Projection(index, value);
}
-void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
- Variable* exception_var) {
- if (if_exception == nullptr) {
- // If no handler is supplied, don't add continuations
- return;
- }
-
- // No catch handlers should be active if we're using catch labels
- DCHECK_EQ(state()->exception_handler_labels_.size(), 0);
- DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
-
- Label success(this), exception(this, Label::kDeferred);
- success.MergeVariables();
- exception.MergeVariables();
-
- raw_assembler()->Continuations(node, success.label_, exception.label_);
-
- Bind(&exception);
- const Operator* op = raw_assembler()->common()->IfException();
- Node* exception_value = raw_assembler()->AddNode(op, node, node);
- if (exception_var != nullptr) {
- exception_var->Bind(exception_value);
- }
- Goto(if_exception);
-
- Bind(&success);
- raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
-}
-
TNode<HeapObject> CodeAssembler::OptimizedAllocate(
TNode<IntPtrT> size, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
@@ -1023,6 +994,33 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
inputs.data());
}
+Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
+ TNode<Object> target, TNode<Object> context,
+ TNode<Object> function,
+ TNode<Object> new_target,
+ TNode<Int32T> arity,
+ std::initializer_list<Node*> args) {
+ constexpr size_t kMaxNumArgs = 10;
+ DCHECK_GE(kMaxNumArgs, args.size());
+ NodeArray<kMaxNumArgs + 5> inputs;
+ inputs.Add(target);
+ inputs.Add(function);
+ if (!new_target.is_null()) {
+ inputs.Add(new_target);
+ }
+ inputs.Add(arity);
+#ifdef V8_REVERSE_JSARGS
+ for (auto arg : base::Reversed(args)) inputs.Add(arg);
+#else
+ for (auto arg : args) inputs.Add(arg);
+#endif
+ if (descriptor.HasContextParameter()) {
+ inputs.Add(context);
+ }
+ return CallStubN(StubCallMode::kCallCodeObject, descriptor, 1, inputs.size(),
+ inputs.data());
+}
+
void CodeAssembler::TailCallStubThenBytecodeDispatchImpl(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args) {
@@ -1112,21 +1110,19 @@ void CodeAssembler::Goto(Label* label) {
raw_assembler()->Goto(label->label_);
}
-void CodeAssembler::GotoIf(SloppyTNode<IntegralT> condition,
- Label* true_label) {
+void CodeAssembler::GotoIf(TNode<IntegralT> condition, Label* true_label) {
Label false_label(this);
Branch(condition, true_label, &false_label);
Bind(&false_label);
}
-void CodeAssembler::GotoIfNot(SloppyTNode<IntegralT> condition,
- Label* false_label) {
+void CodeAssembler::GotoIfNot(TNode<IntegralT> condition, Label* false_label) {
Label true_label(this);
Branch(condition, &true_label, false_label);
Bind(&true_label);
}
-void CodeAssembler::Branch(SloppyTNode<IntegralT> condition, Label* true_label,
+void CodeAssembler::Branch(TNode<IntegralT> condition, Label* true_label,
Label* false_label) {
int32_t constant;
if (ToInt32Constant(condition, &constant)) {
@@ -1557,7 +1553,7 @@ void CodeAssemblerState::PopExceptionHandler() {
exception_handler_labels_.pop_back();
}
-CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
+ScopedExceptionHandler::ScopedExceptionHandler(
CodeAssembler* assembler, CodeAssemblerExceptionHandlerLabel* label)
: has_handler_(label != nullptr),
assembler_(assembler),
@@ -1568,7 +1564,7 @@ CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
}
}
-CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
+ScopedExceptionHandler::ScopedExceptionHandler(
CodeAssembler* assembler, CodeAssemblerLabel* label,
TypedCodeAssemblerVariable<Object>* exception)
: has_handler_(label != nullptr),
@@ -1582,7 +1578,7 @@ CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler(
}
}
-CodeAssemblerScopedExceptionHandler::~CodeAssemblerScopedExceptionHandler() {
+ScopedExceptionHandler::~ScopedExceptionHandler() {
if (has_handler_) {
assembler_->state()->PopExceptionHandler();
}
@@ -1594,7 +1590,7 @@ CodeAssemblerScopedExceptionHandler::~CodeAssemblerScopedExceptionHandler() {
}
TNode<Object> e;
assembler_->Bind(label_.get(), &e);
- *exception_ = e;
+ if (exception_ != nullptr) *exception_ = e;
assembler_->Goto(compatibility_label_);
if (inside_block) {
assembler_->Bind(&skip);
@@ -1625,6 +1621,7 @@ Address CheckObjectType(Address raw_value, Address raw_type,
TYPE_CASE(Object)
TYPE_CASE(Smi)
+ TYPE_CASE(TaggedIndex)
TYPE_CASE(HeapObject)
OBJECT_TYPE_LIST(TYPE_CASE)
HEAP_OBJECT_TYPE_LIST(TYPE_CASE)
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index af49852015..3a137fdee2 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -29,6 +29,8 @@
#include "src/objects/maybe-object.h"
#include "src/objects/objects.h"
#include "src/objects/oddball.h"
+#include "src/objects/smi.h"
+#include "src/objects/tagged-index.h"
#include "src/runtime/runtime.h"
#include "src/utils/allocation.h"
#include "src/zone/zone-containers.h"
@@ -62,8 +64,8 @@ class JSSegmentIterator;
class JSSegmenter;
class JSV8BreakIterator;
class JSWeakCollection;
-class JSFinalizationGroup;
-class JSFinalizationGroupCleanupIterator;
+class JSFinalizationRegistry;
+class JSFinalizationRegistryCleanupIterator;
class JSWeakMap;
class JSWeakRef;
class JSWeakSet;
@@ -75,7 +77,7 @@ class PromiseRejectReactionJobTask;
class WasmDebugInfo;
class Zone;
#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name;
-TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
+TORQUE_INTERNAL_CLASS_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
#undef MAKE_FORWARD_DECLARATION
template <typename T>
@@ -86,6 +88,7 @@ class Signature;
enum class ObjectType {
ENUM_ELEMENT(Object) //
ENUM_ELEMENT(Smi) //
+ ENUM_ELEMENT(TaggedIndex) //
ENUM_ELEMENT(HeapObject) //
OBJECT_TYPE_LIST(ENUM_ELEMENT) //
HEAP_OBJECT_TYPE_LIST(ENUM_ELEMENT) //
@@ -163,6 +166,7 @@ struct ObjectTypeOf {};
};
OBJECT_TYPE_CASE(Object)
OBJECT_TYPE_CASE(Smi)
+OBJECT_TYPE_CASE(TaggedIndex)
OBJECT_TYPE_CASE(HeapObject)
OBJECT_TYPE_LIST(OBJECT_TYPE_CASE)
HEAP_OBJECT_ORDINARY_TYPE_LIST(OBJECT_TYPE_CASE)
@@ -508,6 +512,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<UintPtrT> UintPtrConstant(uintptr_t value) {
return Unsigned(IntPtrConstant(bit_cast<intptr_t>(value)));
}
+ TNode<TaggedIndex> TaggedIndexConstant(intptr_t value);
TNode<RawPtrT> PointerConstant(void* value) {
return ReinterpretCast<RawPtrT>(IntPtrConstant(bit_cast<intptr_t>(value)));
}
@@ -570,7 +575,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Return(TNode<WordT> value1, TNode<WordT> value2);
void PopAndReturn(Node* pop, Node* value);
- void ReturnIf(Node* condition, TNode<Object> value);
+ void ReturnIf(TNode<BoolT> condition, TNode<Object> value);
void AbortCSAAssert(Node* message);
void DebugBreak();
@@ -597,9 +602,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Bind(Label* label, AssemblerDebugInfo debug_info);
#endif // DEBUG
void Goto(Label* label);
- void GotoIf(SloppyTNode<IntegralT> condition, Label* true_label);
- void GotoIfNot(SloppyTNode<IntegralT> condition, Label* false_label);
- void Branch(SloppyTNode<IntegralT> condition, Label* true_label,
+ void GotoIf(TNode<IntegralT> condition, Label* true_label);
+ void GotoIfNot(TNode<IntegralT> condition, Label* false_label);
+ void Branch(TNode<IntegralT> condition, Label* true_label,
Label* false_label);
template <class T>
@@ -620,6 +625,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
if_false->AddInputs(args...);
Branch(condition, if_true->plain_label(), if_false->plain_label());
}
+ template <class... T, class... U>
+ void Branch(TNode<BoolT> condition,
+ CodeAssemblerParameterizedLabel<T...>* if_true,
+ std::vector<Node*> args_true,
+ CodeAssemblerParameterizedLabel<U...>* if_false,
+ std::vector<Node*> args_false) {
+ if_true->AddInputsVector(std::move(args_true));
+ if_false->AddInputsVector(std::move(args_false));
+ Branch(condition, if_true->plain_label(), if_false->plain_label());
+ }
template <class... T, class... Args>
void Goto(CodeAssemblerParameterizedLabel<T...>* label, Args... args) {
@@ -642,8 +657,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> LoadParentFramePointer();
// Poison |value| on speculative paths.
- TNode<Object> TaggedPoisonOnSpeculation(SloppyTNode<Object> value);
- TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value);
+ TNode<Object> TaggedPoisonOnSpeculation(TNode<Object> value);
+ TNode<WordT> WordPoisonOnSpeculation(TNode<WordT> value);
// Load raw memory location.
Node* Load(MachineType type, Node* base,
@@ -905,18 +920,18 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
// Assumes that the double can be exactly represented as an int.
TNode<IntPtrT> ChangeFloat64ToIntPtr(TNode<Float64T> value);
- TNode<UintPtrT> ChangeFloat64ToUintPtr(SloppyTNode<Float64T> value);
+ TNode<UintPtrT> ChangeFloat64ToUintPtr(TNode<Float64T> value);
// Same in the opposite direction.
TNode<Float64T> ChangeUintPtrToFloat64(TNode<UintPtrT> value);
// Changes an intptr_t to a double, e.g. for storing an element index
// outside Smi range in a HeapNumber. Lossless on 32-bit,
// rounds on 64-bit (which doesn't affect valid element indices).
- Node* RoundIntPtrToFloat64(Node* value);
+ TNode<Float64T> RoundIntPtrToFloat64(Node* value);
// No-op on 32-bit, otherwise zero extend.
- TNode<UintPtrT> ChangeUint32ToWord(SloppyTNode<Word32T> value);
+ TNode<UintPtrT> ChangeUint32ToWord(TNode<Word32T> value);
// No-op on 32-bit, otherwise sign extend.
- TNode<IntPtrT> ChangeInt32ToIntPtr(SloppyTNode<Word32T> value);
+ TNode<IntPtrT> ChangeInt32ToIntPtr(TNode<Word32T> value);
// No-op that guarantees that the value is kept alive till this point even
// if GC happens.
@@ -935,26 +950,26 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Calls
template <class... TArgs>
- TNode<Object> CallRuntime(Runtime::FunctionId function,
- SloppyTNode<Object> context, TArgs... args) {
+ TNode<Object> CallRuntime(Runtime::FunctionId function, TNode<Object> context,
+ TArgs... args) {
return CallRuntimeImpl(function, context,
- {implicit_cast<SloppyTNode<Object>>(args)...});
+ {implicit_cast<TNode<Object>>(args)...});
}
template <class... TArgs>
- void TailCallRuntime(Runtime::FunctionId function,
- SloppyTNode<Object> context, TArgs... args) {
+ void TailCallRuntime(Runtime::FunctionId function, TNode<Object> context,
+ TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
TNode<Int32T> arity = Int32Constant(argc);
return TailCallRuntimeImpl(function, arity, context,
- {implicit_cast<SloppyTNode<Object>>(args)...});
+ {implicit_cast<TNode<Object>>(args)...});
}
template <class... TArgs>
void TailCallRuntime(Runtime::FunctionId function, TNode<Int32T> arity,
- SloppyTNode<Object> context, TArgs... args) {
+ TNode<Object> context, TArgs... args) {
return TailCallRuntimeImpl(function, arity, context,
- {implicit_cast<SloppyTNode<Object>>(args)...});
+ {implicit_cast<TNode<Object>>(args)...});
}
//
@@ -962,7 +977,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
//
template <class T = Object, class... TArgs>
- TNode<T> CallStub(Callable const& callable, SloppyTNode<Object> context,
+ TNode<T> CallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
TNode<Code> target = HeapConstant(callable.code());
return CallStub<T>(callable.descriptor(), target, context, args...);
@@ -970,8 +985,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class T = Object, class... TArgs>
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
- SloppyTNode<Code> target, SloppyTNode<Object> context,
- TArgs... args) {
+ TNode<Code> target, TNode<Object> context, TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
1, target, context, args...));
}
@@ -979,8 +993,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
Node* CallStubR(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor, size_t result_size,
- SloppyTNode<Object> target, SloppyTNode<Object> context,
- TArgs... args) {
+ TNode<Object> target, TNode<Object> context, TArgs... args) {
return CallStubRImpl(call_mode, descriptor, result_size, target, context,
{args...});
}
@@ -998,7 +1011,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
template <class... TArgs>
- void TailCallStub(Callable const& callable, SloppyTNode<Object> context,
+ void TailCallStub(Callable const& callable, TNode<Object> context,
TArgs... args) {
TNode<Code> target = HeapConstant(callable.code());
TailCallStub(callable.descriptor(), target, context, args...);
@@ -1006,8 +1019,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
void TailCallStub(const CallInterfaceDescriptor& descriptor,
- SloppyTNode<Code> target, SloppyTNode<Object> context,
- TArgs... args) {
+ TNode<Code> target, TNode<Object> context, TArgs... args) {
TailCallStubImpl(descriptor, target, context, {args...});
}
@@ -1039,19 +1051,22 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* receiver, TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
TNode<Int32T> arity = Int32Constant(argc);
- return CallStub(callable, context, function, arity, receiver, args...);
+ TNode<Code> target = HeapConstant(callable.code());
+ return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
+ CAST(function), TNode<Object>(), arity,
+ {receiver, args...}));
}
template <class... TArgs>
Node* ConstructJSWithTarget(Callable const& callable, Node* context,
- Node* target, Node* new_target, TArgs... args) {
+ Node* function, Node* new_target, TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
-
- // Construct(target, new_target, arity, receiver, arguments...)
- return CallStub(callable, context, target, new_target, arity, receiver,
- args...);
+ TNode<Code> target = HeapConstant(callable.code());
+ return CallJSStubImpl(callable.descriptor(), target, CAST(context),
+ CAST(function), CAST(new_target), arity,
+ {receiver, args...});
}
template <class... TArgs>
Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
@@ -1100,10 +1115,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
{cargs...});
}
- // Exception handling support.
- void GotoIfException(Node* node, Label* if_exception,
- Variable* exception_var = nullptr);
-
// Helpers which delegate to RawMachineAssembler.
Factory* factory() const;
Isolate* isolate() const;
@@ -1164,6 +1175,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
size_t result_size, TNode<Object> target,
TNode<Object> context, std::initializer_list<Node*> args);
+ Node* CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
+ TNode<Object> target, TNode<Object> context,
+ TNode<Object> function, TNode<Object> new_target,
+ TNode<Int32T> arity, std::initializer_list<Node*> args);
+
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x);
@@ -1349,6 +1365,9 @@ class CodeAssemblerParameterizedLabel
private:
friend class CodeAssembler;
+ void AddInputsVector(std::vector<Node*> inputs) {
+ CodeAssemblerParameterizedLabelBase::AddInputs(std::move(inputs));
+ }
void AddInputs(TNode<Types>... inputs) {
CodeAssemblerParameterizedLabelBase::AddInputs(
std::vector<Node*>{inputs...});
@@ -1403,7 +1422,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
friend class CodeAssemblerVariable;
friend class CodeAssemblerTester;
friend class CodeAssemblerParameterizedLabelBase;
- friend class CodeAssemblerScopedExceptionHandler;
+ friend class ScopedExceptionHandler;
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Kind kind,
@@ -1432,18 +1451,17 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
};
-class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler {
+class V8_EXPORT_PRIVATE ScopedExceptionHandler {
public:
- CodeAssemblerScopedExceptionHandler(
- CodeAssembler* assembler, CodeAssemblerExceptionHandlerLabel* label);
+ ScopedExceptionHandler(CodeAssembler* assembler,
+ CodeAssemblerExceptionHandlerLabel* label);
// Use this constructor for compatability/ports of old CSA code only. New code
// should use the CodeAssemblerExceptionHandlerLabel version.
- CodeAssemblerScopedExceptionHandler(
- CodeAssembler* assembler, CodeAssemblerLabel* label,
- TypedCodeAssemblerVariable<Object>* exception);
+ ScopedExceptionHandler(CodeAssembler* assembler, CodeAssemblerLabel* label,
+ TypedCodeAssemblerVariable<Object>* exception);
- ~CodeAssemblerScopedExceptionHandler();
+ ~ScopedExceptionHandler();
private:
bool has_handler_;
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index 92c9c78c71..97d2774016 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -12,21 +12,23 @@ namespace internal {
namespace compiler {
Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
- return external_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
+ return external_constants_.Find(bit_cast<intptr_t>(value.address()));
}
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
- return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
+ return heap_constants_.Find(bit_cast<intptr_t>(value.address()));
}
void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
int32_constants_.GetCachedNodes(nodes);
int64_constants_.GetCachedNodes(nodes);
+ tagged_index_constants_.GetCachedNodes(nodes);
float32_constants_.GetCachedNodes(nodes);
float64_constants_.GetCachedNodes(nodes);
external_constants_.GetCachedNodes(nodes);
+ pointer_constants_.GetCachedNodes(nodes);
number_constants_.GetCachedNodes(nodes);
heap_constants_.GetCachedNodes(nodes);
relocatable_int32_constants_.GetCachedNodes(nodes);
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index 6a36c979a1..b1a8370a7f 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -22,58 +22,70 @@ namespace compiler {
// Bundles various caches for common nodes.
class CommonNodeCache final {
public:
- explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+ explicit CommonNodeCache(Zone* zone)
+ : int32_constants_(zone),
+ int64_constants_(zone),
+ tagged_index_constants_(zone),
+ float32_constants_(zone),
+ float64_constants_(zone),
+ external_constants_(zone),
+ pointer_constants_(zone),
+ number_constants_(zone),
+ heap_constants_(zone),
+ relocatable_int32_constants_(zone),
+ relocatable_int64_constants_(zone) {}
~CommonNodeCache() = default;
Node** FindInt32Constant(int32_t value) {
- return int32_constants_.Find(zone(), value);
+ return int32_constants_.Find(value);
}
Node** FindInt64Constant(int64_t value) {
- return int64_constants_.Find(zone(), value);
+ return int64_constants_.Find(value);
+ }
+
+ Node** FindTaggedIndexConstant(int32_t value) {
+ return tagged_index_constants_.Find(value);
}
Node** FindFloat32Constant(float value) {
// We canonicalize float constants at the bit representation level.
- return float32_constants_.Find(zone(), bit_cast<int32_t>(value));
+ return float32_constants_.Find(bit_cast<int32_t>(value));
}
Node** FindFloat64Constant(double value) {
// We canonicalize double constants at the bit representation level.
- return float64_constants_.Find(zone(), bit_cast<int64_t>(value));
+ return float64_constants_.Find(bit_cast<int64_t>(value));
}
Node** FindExternalConstant(ExternalReference value);
Node** FindPointerConstant(intptr_t value) {
- return pointer_constants_.Find(zone(), value);
+ return pointer_constants_.Find(value);
}
Node** FindNumberConstant(double value) {
// We canonicalize double constants at the bit representation level.
- return number_constants_.Find(zone(), bit_cast<int64_t>(value));
+ return number_constants_.Find(bit_cast<int64_t>(value));
}
Node** FindHeapConstant(Handle<HeapObject> value);
Node** FindRelocatableInt32Constant(int32_t value, RelocInfoMode rmode) {
- return relocatable_int32_constants_.Find(zone(),
- std::make_pair(value, rmode));
+ return relocatable_int32_constants_.Find(std::make_pair(value, rmode));
}
Node** FindRelocatableInt64Constant(int64_t value, RelocInfoMode rmode) {
- return relocatable_int64_constants_.Find(zone(),
- std::make_pair(value, rmode));
+ return relocatable_int64_constants_.Find(std::make_pair(value, rmode));
}
// Return all nodes from the cache.
void GetCachedNodes(ZoneVector<Node*>* nodes);
- Zone* zone() const { return zone_; }
-
private:
Int32NodeCache int32_constants_;
Int64NodeCache int64_constants_;
+ Int32NodeCache tagged_index_constants_;
Int32NodeCache float32_constants_;
Int64NodeCache float64_constants_;
IntPtrNodeCache external_constants_;
@@ -82,7 +94,6 @@ class CommonNodeCache final {
IntPtrNodeCache heap_constants_;
RelocInt32NodeCache relocatable_int32_constants_;
RelocInt64NodeCache relocatable_int64_constants_;
- Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
};
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 5dd765527f..95ecb0f820 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -21,6 +21,9 @@ namespace {
Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
switch (cond->opcode()) {
+ case IrOpcode::kFoldConstant: {
+ return DecideCondition(broker, cond->InputAt(1));
+ }
case IrOpcode::kInt32Constant: {
Int32Matcher mcond(cond);
return mcond.Value() ? Decision::kTrue : Decision::kFalse;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 57cde2ed6e..e548650316 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -257,7 +257,8 @@ size_t hash_value(RelocatablePtrConstantInfo const& p) {
std::ostream& operator<<(std::ostream& os,
RelocatablePtrConstantInfo const& p) {
- return os << p.value() << ", " << p.rmode() << ", " << p.type();
+ return os << p.value() << ", " << static_cast<int>(p.rmode()) << ", "
+ << p.type();
}
SparseInputMask::InputIterator::InputIterator(
@@ -281,6 +282,15 @@ void SparseInputMask::InputIterator::Advance() {
bit_mask_ >>= 1;
}
+size_t SparseInputMask::InputIterator::AdvanceToNextRealOrEnd() {
+ DCHECK_NE(bit_mask_, SparseInputMask::kDenseBitMask);
+
+ size_t count = base::bits::CountTrailingZeros(bit_mask_);
+ bit_mask_ >>= count;
+ DCHECK(IsReal() || IsEnd());
+ return count;
+}
+
Node* SparseInputMask::InputIterator::GetReal() const {
DCHECK(IsReal());
return parent_->InputAt(real_index_);
@@ -454,8 +464,6 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1) \
V(Throw, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
- V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(OsrLoopEntry, Operator::kFoldable | Operator::kNoThrow, 0, 1, 1, 0, 1, 1) \
V(LoopExit, Operator::kKontrol, 0, 0, 2, 0, 0, 1) \
V(LoopExitValue, Operator::kPure, 1, 0, 1, 1, 0, 0) \
V(LoopExitEffect, Operator::kNoThrow, 0, 1, 1, 0, 1, 0) \
@@ -1159,6 +1167,13 @@ const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
value); // parameter
}
+const Operator* CommonOperatorBuilder::TaggedIndexConstant(int32_t value) {
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kTaggedIndexConstant, Operator::kPure, // opcode
+ "TaggedIndexConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
+}
const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
return new (zone()) Operator1<float>( // --
@@ -1295,6 +1310,13 @@ const Operator* CommonOperatorBuilder::TypeGuard(Type type) {
type); // parameter
}
+const Operator* CommonOperatorBuilder::FoldConstant() {
+ return new (zone()) Operator( // --
+ IrOpcode::kFoldConstant, Operator::kPure, // opcode
+ "FoldConstant", // name
+ 2, 0, 0, 1, 0, 0); // counts
+}
+
const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
DCHECK_LT(0, effect_input_count); // Disallow empty effect phis.
switch (effect_input_count) {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 2b0dcc7db9..14bb48b073 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -273,6 +273,11 @@ class SparseInputMask final {
// current sparse input is real.
Node* GetReal() const;
+ // Advance to the next real value or the end. Only valid if the iterator is
+ // not dense. Returns the number of empty values that were skipped. This can
+ // return 0 and in that case, it does not advance.
+ size_t AdvanceToNextRealOrEnd();
+
// Get the current sparse input, returning either a real input node if
// the current sparse input is real, or the given {empty_value} if the
// current sparse input is empty.
@@ -487,12 +492,11 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Merge(int control_input_count);
const Operator* Parameter(int index, const char* debug_name = nullptr);
- const Operator* OsrNormalEntry();
- const Operator* OsrLoopEntry();
const Operator* OsrValue(int index);
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
+ const Operator* TaggedIndexConstant(int32_t value);
const Operator* Float32Constant(volatile float);
const Operator* Float64Constant(volatile double);
const Operator* ExternalConstant(const ExternalReference&);
@@ -534,6 +538,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Projection(size_t index);
const Operator* Retain();
const Operator* TypeGuard(Type type);
+ const Operator* FoldConstant();
// Constructs a new merge or phi operator with the same opcode as {op}, but
// with {size} inputs.
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index 5a903273ed..9649dbda08 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -11,6 +11,51 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+Node* TryGetConstant(JSGraph* jsgraph, Node* node) {
+ Type type = NodeProperties::GetType(node);
+ Node* result;
+ if (type.IsNone()) {
+ result = nullptr;
+ } else if (type.Is(Type::Null())) {
+ result = jsgraph->NullConstant();
+ } else if (type.Is(Type::Undefined())) {
+ result = jsgraph->UndefinedConstant();
+ } else if (type.Is(Type::MinusZero())) {
+ result = jsgraph->MinusZeroConstant();
+ } else if (type.Is(Type::NaN())) {
+ result = jsgraph->NaNConstant();
+ } else if (type.Is(Type::Hole())) {
+ result = jsgraph->TheHoleConstant();
+ } else if (type.IsHeapConstant()) {
+ result = jsgraph->Constant(type.AsHeapConstant()->Ref());
+ } else if (type.Is(Type::PlainNumber()) && type.Min() == type.Max()) {
+ result = jsgraph->Constant(type.Min());
+ } else {
+ result = nullptr;
+ }
+ DCHECK_EQ(result != nullptr, type.IsSingleton());
+ DCHECK_IMPLIES(result != nullptr,
+ type.Equals(NodeProperties::GetType(result)));
+ return result;
+}
+
+bool IsAlreadyBeingFolded(Node* node) {
+ DCHECK(FLAG_assert_types);
+ if (node->opcode() == IrOpcode::kFoldConstant) return true;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge) &&
+ edge.from()->opcode() == IrOpcode::kFoldConstant) {
+ // Note: {node} may have gained new value uses since the time it was
+ // "constant-folded", and theses uses should ideally be rewritten as well.
+ // For simplicity, we ignore them here.
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+
ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker)
: AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
@@ -19,43 +64,27 @@ ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
- // Check if the output type is a singleton. In that case we already know the
- // result value and can simply replace the node if it's eliminable.
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
- node->op()->HasProperty(Operator::kEliminatable)) {
- // TODO(v8:5303): We must not eliminate FinishRegion here. This special
- // case can be removed once we have separate operators for value and
- // effect regions.
- if (node->opcode() == IrOpcode::kFinishRegion) return NoChange();
- // We can only constant-fold nodes here, that are known to not cause any
- // side-effect, may it be a JavaScript observable side-effect or a possible
- // eager deoptimization exit (i.e. {node} has an operator that doesn't have
- // the Operator::kNoDeopt property).
- Type upper = NodeProperties::GetType(node);
- if (!upper.IsNone()) {
- Node* replacement = nullptr;
- if (upper.IsHeapConstant()) {
- replacement = jsgraph()->Constant(upper.AsHeapConstant()->Ref());
- } else if (upper.Is(Type::MinusZero())) {
- Factory* factory = jsgraph()->isolate()->factory();
- ObjectRef minus_zero(broker(), factory->minus_zero_value());
- replacement = jsgraph()->Constant(minus_zero);
- } else if (upper.Is(Type::NaN())) {
- replacement = jsgraph()->NaNConstant();
- } else if (upper.Is(Type::Null())) {
- replacement = jsgraph()->NullConstant();
- } else if (upper.Is(Type::PlainNumber()) && upper.Min() == upper.Max()) {
- replacement = jsgraph()->Constant(upper.Min());
- } else if (upper.Is(Type::Undefined())) {
- replacement = jsgraph()->UndefinedConstant();
- }
- if (replacement) {
- // Make sure the node has a type.
- if (!NodeProperties::IsTyped(replacement)) {
- NodeProperties::SetType(replacement, upper);
- }
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
+ node->op()->HasProperty(Operator::kEliminatable) &&
+ node->opcode() != IrOpcode::kFinishRegion) {
+ Node* constant = TryGetConstant(jsgraph(), node);
+ if (constant != nullptr) {
+ DCHECK(NodeProperties::IsTyped(constant));
+ if (!FLAG_assert_types) {
+ DCHECK_EQ(node->op()->ControlOutputCount(), 0);
+ ReplaceWithValue(node, constant);
+ return Replace(constant);
+ } else if (!IsAlreadyBeingFolded(node)) {
+ // Delay the constant folding (by inserting a FoldConstant operation
+ // instead) in order to keep type assertions meaningful.
+ Node* fold_constant = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->FoldConstant(), node, constant);
+ DCHECK(NodeProperties::IsTyped(fold_constant));
+ ReplaceWithValue(node, fold_constant, node, node);
+ fold_constant->ReplaceInput(0, node);
+ DCHECK(IsAlreadyBeingFolded(node));
+ DCHECK(IsAlreadyBeingFolded(fold_constant));
+ return Changed(node);
}
}
}
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index f39e6cabfb..bab6b7b506 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -317,7 +317,10 @@ Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminateOrTailCall(
node->opcode() == IrOpcode::kTailCall);
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (FindDeadInput(node) != nullptr) {
+ // Terminate nodes are not part of actual control flow, so they should never
+ // be replaced with Throw.
+ if (node->opcode() != IrOpcode::kTerminate &&
+ FindDeadInput(node) != nullptr) {
Node* effect = NodeProperties::GetEffectInput(node, 0);
Node* control = NodeProperties::GetControlInput(node, 0);
if (effect->opcode() != IrOpcode::kUnreachable) {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 3f170df065..d3344b9545 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -75,6 +75,7 @@ class EffectControlLinearizer {
void LowerCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
Node* LowerCheckNumber(Node* node, Node* frame_state);
+ Node* LowerCheckClosure(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
@@ -198,6 +199,7 @@ class EffectControlLinearizer {
void LowerTransitionAndStoreNonNumberElement(Node* node);
void LowerRuntimeAbort(Node* node);
Node* LowerAssertType(Node* node);
+ Node* LowerFoldConstant(Node* node);
Node* LowerConvertReceiver(Node* node);
Node* LowerDateNow(Node* node);
@@ -235,6 +237,10 @@ class EffectControlLinearizer {
Node* BuildTypedArrayDataPointer(Node* base, Node* external);
+ template <typename... Args>
+ Node* CallBuiltin(Builtins::Name builtin, Operator::Properties properties,
+ Args...);
+
Node* ChangeInt32ToSmi(Node* value);
// In pointer compression, we smi-corrupt. This means the upper bits of a Smi
// are not important. ChangeTaggedInt32ToSmi has a known tagged int32 as input
@@ -903,6 +909,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kPoisonIndex:
result = LowerPoisonIndex(node);
break;
+ case IrOpcode::kCheckClosure:
+ result = LowerCheckClosure(node, frame_state);
+ break;
case IrOpcode::kCheckMaps:
LowerCheckMaps(node, frame_state);
break;
@@ -1299,6 +1308,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kDateNow:
result = LowerDateNow(node);
break;
+ case IrOpcode::kFoldConstant:
+ result = LowerFoldConstant(node);
+ break;
default:
return false;
}
@@ -1702,6 +1714,30 @@ Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
return index;
}
+Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
+ Node* frame_state) {
+ Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
+ Node* value = node->InputAt(0);
+
+ // Check that {value} is actually a JSFunction.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* check_instance_type =
+ __ Word32Equal(value_instance_type, __ Int32Constant(JS_FUNCTION_TYPE));
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongCallTarget, FeedbackSource(),
+ check_instance_type, frame_state);
+
+ // Check that the {value}s feedback vector cell matches the one
+ // we recorded before.
+ Node* value_cell =
+ __ LoadField(AccessBuilder::ForJSFunctionFeedbackCell(), value);
+ Node* check_cell = __ WordEqual(value_cell, __ HeapConstant(feedback_cell));
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongFeedbackCell, FeedbackSource(),
+ check_cell, frame_state);
+ return value;
+}
+
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
Node* value = node->InputAt(0);
@@ -2380,13 +2416,31 @@ Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* frame_state) {
- CheckParameters const& params = CheckParametersOf(node->op());
Node* const index = node->InputAt(0);
Node* const limit = node->InputAt(1);
+ const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
Node* check = __ Uint64LessThan(index, limit);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ switch (params.mode()) {
+ case CheckBoundsParameters::kDeoptOnOutOfBounds:
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
+ params.check_parameters().feedback(), check,
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ break;
+ case CheckBoundsParameters::kAbortOnOutOfBounds: {
+ auto if_abort = __ MakeDeferredLabel();
+ auto done = __ MakeLabel();
+
+ __ Branch(check, &done, &if_abort);
+
+ __ Bind(&if_abort);
+ __ Unreachable();
+ __ Goto(&done);
+
+ __ Bind(&done);
+ break;
+ }
+ }
return index;
}
@@ -3432,9 +3486,9 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
__ Goto(&if_adaptor_frame);
__ Bind(&if_adaptor_frame);
- Node* arguments_length = __ Load(
- MachineType::TaggedSigned(), arguments_frame,
- __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset));
+ Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
+ MachineType::Pointer(), arguments_frame,
+ __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
@@ -3457,9 +3511,9 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
__ Goto(&if_adaptor_frame);
__ Bind(&if_adaptor_frame);
- Node* arguments_length = __ Load(
- MachineType::TaggedSigned(), arguments_frame,
- __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset));
+ Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
+ MachineType::Pointer(), arguments_frame,
+ __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
__ Goto(&done, arguments_length);
__ Bind(&done);
@@ -5421,28 +5475,39 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
__ Int32Constant(1), __ NoContextConstant());
}
+template <typename... Args>
+Node* EffectControlLinearizer::CallBuiltin(Builtins::Name builtin,
+ Operator::Properties properties,
+ Args... args) {
+ Callable const callable = Builtins::CallableFor(isolate(), builtin);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), args...,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerAssertType(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
Type type = OpParameter<Type>(node->op());
DCHECK(type.IsRange());
auto range = type.AsRange();
-
Node* const input = node->InputAt(0);
Node* const min = __ NumberConstant(range->Min());
Node* const max = __ NumberConstant(range->Max());
-
- {
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kCheckNumberInRange);
- Operator::Properties const properties = node->op()->properties();
- CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- callable.descriptor().GetStackParameterCount(), flags, properties);
- __ Call(call_descriptor, __ HeapConstant(callable.code()), input, min, max,
- __ NoContextConstant());
- return input;
- }
+ CallBuiltin(Builtins::kCheckNumberInRange, node->op()->properties(), input,
+ min, max);
+ return input;
+}
+
+Node* EffectControlLinearizer::LowerFoldConstant(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFoldConstant);
+ Node* original = node->InputAt(0);
+ Node* constant = node->InputAt(1);
+ CallBuiltin(Builtins::kCheckSameObject, node->op()->properties(), original,
+ constant);
+ return constant;
}
Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 09b66c306f..246bf1e229 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -648,11 +648,20 @@ Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
effect(), control()));
}
+Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, int offset,
+ Node* value) {
+ return Store(rep, object, Int32Constant(offset), value);
+}
+
Node* GraphAssembler::Load(MachineType type, Node* object, Node* offset) {
return AddNode(graph()->NewNode(machine()->Load(type), object, offset,
effect(), control()));
}
+Node* GraphAssembler::Load(MachineType type, Node* object, int offset) {
+ return Load(type, object, Int32Constant(offset));
+}
+
Node* GraphAssembler::StoreUnaligned(MachineRepresentation rep, Node* object,
Node* offset, Node* value) {
Operator const* const op =
@@ -689,6 +698,11 @@ TNode<Number> JSGraphAssembler::PlainPrimitiveToNumber(TNode<Object> value) {
NoContextConstant(), effect()));
}
+Node* GraphAssembler::BitcastWordToTaggedSigned(Node* value) {
+ return AddNode(
+ graph()->NewNode(machine()->BitcastWordToTaggedSigned(), value));
+}
+
Node* GraphAssembler::BitcastWordToTagged(Node* value) {
return AddNode(graph()->NewNode(machine()->BitcastWordToTagged(), value,
effect(), control()));
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 92c37a1860..b9f605ae6e 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -87,6 +87,7 @@ class BasicBlock;
V(Word32Xor) \
V(Word64And) \
V(Word64Equal) \
+ V(Word64Or) \
V(WordAnd) \
V(WordEqual) \
V(WordSar) \
@@ -302,6 +303,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Float64RoundTruncate(Node* value);
Node* BitcastWordToTagged(Node* value);
+ Node* BitcastWordToTaggedSigned(Node* value);
Node* BitcastTaggedToWord(Node* value);
Node* BitcastTaggedToWordForTagAndSmiBits(Node* value);
@@ -309,7 +311,9 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Checkpoint(FrameState frame_state);
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
+ Node* Store(StoreRepresentation rep, Node* object, int offset, Node* value);
Node* Load(MachineType type, Node* object, Node* offset);
+ Node* Load(MachineType type, Node* object, int offset);
Node* StoreUnaligned(MachineRepresentation rep, Node* object, Node* offset,
Node* value);
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index c9bb6bf9d1..deeaa89c1e 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -12,6 +12,8 @@
#include "src/objects/instance-type.h"
namespace v8 {
+class CFunctionInfo;
+
namespace internal {
class BytecodeArray;
@@ -490,7 +492,7 @@ class FeedbackCellRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef)
Handle<FeedbackCell> object() const;
-
+ base::Optional<SharedFunctionInfoRef> shared_function_info() const;
HeapObjectRef value() const;
};
@@ -500,9 +502,11 @@ class FeedbackVectorRef : public HeapObjectRef {
Handle<FeedbackVector> object() const;
+ SharedFunctionInfoRef shared_function_info() const;
double invocation_count() const;
void Serialize();
+ bool serialized() const;
FeedbackCellRef GetClosureFeedbackCell(int index) const;
};
@@ -659,6 +663,8 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
void SerializeCallCode();
base::Optional<CallHandlerInfoRef> call_code() const;
+ Address c_function() const;
+ const CFunctionInfo* c_signature() const;
HolderLookupResult LookupHolderOfExpectedType(
MapRef receiver_map,
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index bc6267f375..ad68d34d03 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -1023,14 +1023,12 @@ bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
return something_changed;
}
-CallDescriptor* Int64Lowering::LowerCallDescriptor(
+const CallDescriptor* Int64Lowering::LowerCallDescriptor(
const CallDescriptor* call_descriptor) {
if (special_case_) {
- if (call_descriptor == special_case_->bigint_to_i64_call_descriptor) {
- return special_case_->bigint_to_i32_pair_call_descriptor;
- }
- if (call_descriptor == special_case_->i64_to_bigint_call_descriptor) {
- return special_case_->i32_pair_to_bigint_call_descriptor;
+ auto replacement = special_case_->replacements.find(call_descriptor);
+ if (replacement != special_case_->replacements.end()) {
+ return replacement->second;
}
}
return GetI32WasmCallDescriptor(zone(), call_descriptor);
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 0190d3a9c4..944d2bc32a 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -24,19 +24,9 @@ namespace compiler {
// Struct for CallDescriptors that need special lowering.
struct V8_EXPORT_PRIVATE Int64LoweringSpecialCase {
- Int64LoweringSpecialCase()
- : bigint_to_i64_call_descriptor(nullptr),
- i64_to_bigint_call_descriptor(nullptr),
- bigint_to_i32_pair_call_descriptor(nullptr),
- i32_pair_to_bigint_call_descriptor(nullptr) {}
-
- // CallDescriptors that need special lowering.
- CallDescriptor* bigint_to_i64_call_descriptor;
- CallDescriptor* i64_to_bigint_call_descriptor;
-
- // The replacement CallDescriptors.
- CallDescriptor* bigint_to_i32_pair_call_descriptor;
- CallDescriptor* i32_pair_to_bigint_call_descriptor;
+ // Map from CallDescriptors that should be replaced, to the replacement
+ // CallDescriptors.
+ std::unordered_map<const CallDescriptor*, const CallDescriptor*> replacements;
};
class V8_EXPORT_PRIVATE Int64Lowering {
@@ -74,7 +64,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
void LowerWord64AtomicBinop(Node* node, const Operator* op);
void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
- CallDescriptor* LowerCallDescriptor(const CallDescriptor* call_descriptor);
+ const CallDescriptor* LowerCallDescriptor(
+ const CallDescriptor* call_descriptor);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index dc4e4000ca..3fb41a8809 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -6,7 +6,9 @@
#include <functional>
+#include "include/v8-fast-api-calls.h"
#include "src/api/api-inl.h"
+#include "src/base/small-vector.h"
#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-utils.h"
#include "src/codegen/code-factory.h"
@@ -864,6 +866,88 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
}
};
+class FastApiCallReducerAssembler : public JSCallReducerAssembler {
+ public:
+ FastApiCallReducerAssembler(JSGraph* jsgraph, Zone* zone, Node* node,
+ Address c_function,
+ const CFunctionInfo* c_signature)
+ : JSCallReducerAssembler(jsgraph, zone, node),
+ c_function_(c_function),
+ c_signature_(c_signature) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ DCHECK_NE(c_function_, kNullAddress);
+ CHECK_NOT_NULL(c_signature_);
+ }
+
+ TNode<Object> ReduceFastApiCall() {
+ int c_arg_count = c_signature_->ArgumentCount();
+ Node* function_node =
+ ExternalConstant(ExternalReference::Create(c_function_));
+ base::SmallVector<Node*, kInlineSize + kExtraInputsCount> inputs(0);
+ inputs.emplace_back(function_node);
+ int wrapper_object_index = isolate()->embedder_wrapper_object_index();
+ CHECK_GE(wrapper_object_index, 0);
+ for (int i = 0; i < c_arg_count; ++i) {
+ if (i + kFunctionArgCount < ValueInputCount()) {
+ inputs.emplace_back(ConvertArgumentIfJSWrapper(
+ c_signature_->ArgumentInfo(i).GetType(),
+ ValueInput(i + kFunctionArgCount), wrapper_object_index));
+ } else {
+ inputs.emplace_back(UndefinedConstant());
+ }
+ }
+ inputs.emplace_back(effect());
+ inputs.emplace_back(control());
+
+ return FastApiCall(inputs);
+ }
+
+ private:
+ static constexpr int kFunctionArgCount = 1;
+ static constexpr int kExtraInputsCount =
+ kFunctionArgCount + 2; // effect, control
+ static constexpr int kInlineSize = 10;
+
+ TNode<Object> FastApiCall(
+ base::SmallVector<Node*, kInlineSize + kExtraInputsCount> const& inputs) {
+ return AddNode<Object>(
+ graph()->NewNode(simplified()->FastApiCall(c_signature_, feedback()),
+ static_cast<int>(inputs.size()), inputs.begin()));
+ }
+
+ TNode<RawPtrT> UnwrapApiObject(TNode<JSObject> node,
+ int wrapper_object_index) {
+ const int offset =
+ Internals::kJSObjectHeaderSize +
+ (Internals::kEmbedderDataSlotSize * wrapper_object_index);
+
+ FieldAccess access(kTaggedBase, offset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), Type::Any(), MachineType::Pointer(),
+ WriteBarrierKind::kNoWriteBarrier);
+ TNode<RawPtrT> load = AddNode<RawPtrT>(graph()->NewNode(
+ simplified()->LoadField(access), node, effect(), control()));
+ return load;
+ }
+
+ Node* ConvertArgumentIfJSWrapper(CTypeInfo::Type type, TNode<Object> node,
+ int wrapper_object_index) {
+ switch (type) {
+ case CTypeInfo::Type::kUnwrappedApiObject:
+ // This call assumes that {node} is a JSObject with an internal field
+ // set to a C pointer. It should fail in all other cases.
+ // TODO(mslekova): Implement instanceOf check for the C pointer type.
+ // TODO(mslekova): Introduce a GraphAssembler primitive for safe cast.
+ return UnwrapApiObject(TNode<JSObject>::UncheckedCast(node),
+ wrapper_object_index);
+ default:
+ return node;
+ }
+ }
+
+ const Address c_function_;
+ const CFunctionInfo* const c_signature_;
+};
+
TNode<Number> JSCallReducerAssembler::SpeculativeToNumber(
TNode<Object> value, NumberOperationHint hint) {
return AddNode<Number>(
@@ -3368,8 +3452,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// See if we can constant-fold the compatible receiver checks.
HolderLookupResult api_holder =
function_template_info.LookupHolderOfExpectedType(first_receiver_map);
- if (api_holder.lookup == CallOptimization::kHolderNotFound)
+ if (api_holder.lookup == CallOptimization::kHolderNotFound) {
return inference.NoChange();
+ }
// Check that all {receiver_maps} are actually JSReceiver maps and
// that the {function_template_info} accepts them without access
@@ -3471,6 +3556,18 @@ Reduction JSCallReducer::ReduceCallApiFunction(
<< function_template_info);
return NoChange();
}
+
+ Address c_function = function_template_info.c_function();
+
+ if (FLAG_turbo_fast_api_calls && c_function != kNullAddress) {
+ const CFunctionInfo* c_signature = function_template_info.c_signature();
+ FastApiCallReducerAssembler a(jsgraph(), graph()->zone(), node, c_function,
+ c_signature);
+ Node* c_call = a.ReduceFastApiCall();
+ ReplaceWithSubgraph(&a, c_call);
+ return Replace(c_call);
+ }
+
CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
@@ -3733,7 +3830,7 @@ namespace {
bool ShouldUseCallICFeedback(Node* node) {
HeapObjectMatcher m(node);
- if (m.HasValue() || m.IsJSCreateClosure()) {
+ if (m.HasValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
// Don't use CallIC feedback when we know the function
// being called, i.e. either know the closure itself or
// at least the SharedFunctionInfo.
@@ -3827,9 +3924,14 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// just immediately try to inline based on the SharedFunctionInfo,
// since TurboFan generally doesn't inline cross-context, and hence
// the {target} must have the same native context as the call site.
+ // Same if the {target} is the result of a CheckClosure operation.
if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& p = CreateClosureParametersOf(target->op());
return ReduceJSCall(node, SharedFunctionInfoRef(broker(), p.shared_info()));
+ } else if (target->opcode() == IrOpcode::kCheckClosure) {
+ FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
+ return ReduceJSCall(node,
+ cell.value().AsFeedbackVector().shared_function_info());
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -3898,8 +4000,32 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
- }
+ } else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
+ FeedbackCellRef feedback_cell(
+ broker(), feedback_target.value().AsFeedbackCell().object());
+ if (feedback_cell.value().IsFeedbackVector()) {
+ // Check that {target} is a closure with given {feedback_cell},
+ // which uniquely identifies a given function inside a native context.
+ FeedbackVectorRef feedback_vector =
+ feedback_cell.value().AsFeedbackVector();
+ if (!feedback_vector.serialized()) {
+ TRACE_BROKER_MISSING(
+ broker(), "feedback vector, not serialized: " << feedback_vector);
+ return NoChange();
+ }
+ Node* target_closure = effect =
+ graph()->NewNode(simplified()->CheckClosure(feedback_cell.object()),
+ target, effect, control);
+
+ // Specialize the JSCall node to the {target_closure}.
+ NodeProperties::ReplaceValueInput(node, target_closure, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ // Try to further reduce the JSCall {node}.
+ Reduction const reduction = ReduceJSCall(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
return NoChange();
}
@@ -6403,13 +6529,8 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
jsgraph()->Constant(TYPE##_ELEMENTS - \
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \
control = graph()->NewNode(common()->Branch(), check, control); \
- if (should_disallow_heap_access()) { \
- values.push_back(jsgraph()->Constant( \
- broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS))); \
- } else { \
- values.push_back(jsgraph()->HeapConstant( \
- factory()->InternalizeUtf8String(#Type "Array"))); \
- } \
+ values.push_back(jsgraph()->Constant( \
+ broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS))); \
effects.push_back(effect); \
controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \
control = graph()->NewNode(common()->IfFalse(), control); \
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 6f76cb7c78..d0059030d5 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -256,10 +256,12 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
}
UNREACHABLE();
- } else if (outer_state->opcode() == IrOpcode::kFrameState) {
- // Use inline allocation for all mapped arguments objects within inlined
- // (i.e. non-outermost) frames, independent of the object size.
- if (type == CreateArgumentsType::kMappedArguments) {
+ }
+ // Use inline allocation for all mapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ DCHECK_EQ(outer_state->opcode(), IrOpcode::kFrameState);
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments: {
Node* const callee = NodeProperties::GetValueInput(node, 0);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -300,7 +302,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
- } else if (type == CreateArgumentsType::kUnmappedArguments) {
+ }
+ case CreateArgumentsType::kUnmappedArguments: {
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
@@ -335,7 +338,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
- } else if (type == CreateArgumentsType::kRestParameter) {
+ }
+ case CreateArgumentsType::kRestParameter: {
int start_index = shared.internal_formal_parameter_count();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
@@ -378,8 +382,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
}
-
- return NoChange();
+ UNREACHABLE();
}
Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
@@ -1421,9 +1424,9 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
a.AllocateArray(argument_count,
MapRef(broker(), factory()->fixed_array_map()));
for (int i = 0; i < argument_count; ++i, ++parameters_it) {
- DCHECK_NOT_NULL((*parameters_it).node);
+ DCHECK_NOT_NULL(parameters_it.node());
a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- (*parameters_it).node);
+ parameters_it.node());
}
return a.Finish();
}
@@ -1452,9 +1455,9 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
AllocationBuilder a(jsgraph(), effect, control);
a.AllocateArray(num_elements, MapRef(broker(), factory()->fixed_array_map()));
for (int i = 0; i < num_elements; ++i, ++parameters_it) {
- DCHECK_NOT_NULL((*parameters_it).node);
+ DCHECK_NOT_NULL(parameters_it.node());
a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- (*parameters_it).node);
+ parameters_it.node());
}
return a.Finish();
}
@@ -1496,9 +1499,9 @@ Node* JSCreateLowering::AllocateAliasedArguments(
jsgraph()->TheHoleConstant());
}
for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
- DCHECK_NOT_NULL((*parameters_it).node);
+ DCHECK_NOT_NULL(parameters_it.node());
aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- (*parameters_it).node);
+ parameters_it.node());
}
Node* arguments = aa.Finish();
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index e565d9ee40..7a3610fff2 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -168,7 +168,8 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
const PropertyAccess& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = Builtins::CallableFor(
isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
@@ -198,7 +199,8 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
ReplaceWithStubCall(node, callable, flags);
return;
}
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = Builtins::CallableFor(
isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
@@ -222,7 +224,8 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 1,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
ReplaceWithStubCall(node, callable, flags);
@@ -252,7 +255,8 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kKeyedStoreICTrampoline);
@@ -276,7 +280,8 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty);
return;
}
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kStoreICTrampoline);
@@ -295,7 +300,8 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::StoreOwnIC(isolate());
ReplaceWithStubCall(node, callable, flags);
@@ -313,7 +319,8 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
@@ -332,7 +339,7 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
RelaxControls(node);
node->InsertInputs(zone(), 4, 2);
node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector));
- node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(5, jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
}
@@ -342,7 +349,8 @@ void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
RelaxControls(node);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector));
ReplaceWithStubCall(node, callable, flags);
}
@@ -550,7 +558,8 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 1,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
// Use the CreateShallowArrayLiteratlr builtin only for shallow boilerplates
@@ -574,7 +583,8 @@ void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 1,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->RemoveInput(4); // control
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCreateEmptyArrayLiteral);
@@ -592,7 +602,8 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 1,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -615,7 +626,8 @@ void JSGenericLowering::LowerJSCloneObject(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
ReplaceWithStubCall(node, callable, flags);
}
@@ -630,7 +642,8 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
- node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 1,
+ jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithStubCall(node, callable, flags);
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index beed7820b4..bd733ae413 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -160,6 +160,8 @@ DEFINE_GETTER(NullConstant, HeapConstant(factory()->null_value()))
DEFINE_GETTER(ZeroConstant, NumberConstant(0.0))
+DEFINE_GETTER(MinusZeroConstant, NumberConstant(-0.0))
+
DEFINE_GETTER(OneConstant, NumberConstant(1.0))
DEFINE_GETTER(MinusOneConstant, NumberConstant(-1.0))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 83c81b1010..f965451e35 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -99,9 +99,10 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
V(FalseConstant) \
V(NullConstant) \
V(ZeroConstant) \
+ V(MinusZeroConstant) \
V(OneConstant) \
- V(NaNConstant) \
V(MinusOneConstant) \
+ V(NaNConstant) \
V(EmptyStateValues) \
V(SingleDeadTypedStateValues)
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 27df7fc4e9..6acc77cdcc 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -9,6 +9,7 @@
#include <algorithm>
#endif
+#include "include/v8-fast-api-calls.h"
#include "src/api/api-inl.h"
#include "src/ast/modules.h"
#include "src/codegen/code-factory.h"
@@ -226,6 +227,8 @@ class FunctionTemplateInfoData : public HeapObjectData {
void SerializeCallCode(JSHeapBroker* broker);
CallHandlerInfoData* call_code() const { return call_code_; }
+ Address c_function() const { return c_function_; }
+ const CFunctionInfo* c_signature() const { return c_signature_; }
KnownReceiversMap& known_receivers() { return known_receivers_; }
private:
@@ -234,6 +237,8 @@ class FunctionTemplateInfoData : public HeapObjectData {
bool has_call_code_ = false;
CallHandlerInfoData* call_code_ = nullptr;
+ const Address c_function_;
+ const CFunctionInfo* const c_signature_;
KnownReceiversMap known_receivers_;
};
@@ -257,6 +262,8 @@ FunctionTemplateInfoData::FunctionTemplateInfoData(
JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object)
: HeapObjectData(broker, storage, object),
+ c_function_(v8::ToCData<Address>(object->GetCFunction())),
+ c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())),
known_receivers_(broker->zone()) {
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
is_signature_undefined_ =
@@ -821,9 +828,12 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
chars_as_strings_(broker->zone()) {
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
if (length_ <= kMaxLengthForDoubleConversion) {
- to_number_ = StringToDouble(broker->isolate(), object, flags);
+ const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ uc16 buffer[kMaxLengthForDoubleConversion];
+ String::WriteToFlat(*object, buffer, 0, length_);
+ Vector<const uc16> v(buffer, length_);
+ to_number_ = StringToDouble(v, flags);
}
}
@@ -1353,7 +1363,13 @@ class FeedbackVectorData : public HeapObjectData {
double invocation_count() const { return invocation_count_; }
+ SharedFunctionInfoData* shared_function_info() {
+ CHECK(serialized_);
+ return shared_function_info_;
+ }
+
void Serialize(JSHeapBroker* broker);
+ bool serialized() const { return serialized_; }
FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker,
int index) const;
@@ -1361,6 +1377,7 @@ class FeedbackVectorData : public HeapObjectData {
double const invocation_count_;
bool serialized_ = false;
+ SharedFunctionInfoData* shared_function_info_;
ZoneVector<ObjectData*> closure_feedback_cell_array_;
};
@@ -1392,6 +1409,9 @@ void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
+ Handle<SharedFunctionInfo> sfi(vector->shared_function_info(),
+ broker->isolate());
+ shared_function_info_ = broker->GetOrCreateData(sfi)->AsSharedFunctionInfo();
DCHECK(closure_feedback_cell_array_.empty());
int length = vector->closure_feedback_cell_array().length();
closure_feedback_cell_array_.reserve(length);
@@ -2510,23 +2530,17 @@ void JSHeapBroker::CollectArrayAndObjectPrototypes() {
CHECK(!array_and_object_prototypes_.empty());
}
-void JSHeapBroker::SerializeTypedArrayStringTags() {
-#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype) \
- do { \
- ObjectData* data = GetOrCreateData( \
- isolate()->factory()->InternalizeUtf8String(#Type "Array")); \
- typed_array_string_tags_.push_back(data); \
- } while (false);
-
- TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG)
-#undef TYPED_ARRAY_STRING_TAG
-}
-
StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
DCHECK(IsTypedArrayElementsKind(kind));
- size_t idx = kind - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
- CHECK_LT(idx, typed_array_string_tags_.size());
- return StringRef(this, typed_array_string_tags_[idx]);
+ switch (kind) {
+#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype) \
+ case ElementsKind::TYPE##_ELEMENTS: \
+ return StringRef(this, isolate()->factory()->Type##Array_string());
+ TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG)
+#undef TYPED_ARRAY_STRING_TAG
+ default:
+ UNREACHABLE();
+ }
}
bool JSHeapBroker::ShouldBeSerializedForCompilation(
@@ -2593,7 +2607,6 @@ void JSHeapBroker::InitializeAndStartSerializing(
target_native_context().Serialize();
CollectArrayAndObjectPrototypes();
- SerializeTypedArrayStringTags();
// Serialize Cells
Factory* const f = isolate()->factory();
@@ -3676,6 +3689,20 @@ Address CallHandlerInfoRef::callback() const {
return HeapObjectRef::data()->AsCallHandlerInfo()->callback();
}
+Address FunctionTemplateInfoRef::c_function() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return v8::ToCData<Address>(object()->GetCFunction());
+ }
+ return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_function();
+}
+
+const CFunctionInfo* FunctionTemplateInfoRef::c_signature() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return v8::ToCData<CFunctionInfo*>(object()->GetCSignature());
+ }
+ return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_signature();
+}
+
bool StringRef::IsSeqString() const {
IF_ACCESS_FROM_HEAP_C(String, IsSeqString);
return data()->AsString()->is_seq_string();
@@ -3694,6 +3721,22 @@ ScopeInfoRef NativeContextRef::scope_info() const {
return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
}
+SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
+ if (data_->should_access_heap()) {
+ DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
+ AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return SharedFunctionInfoRef(
+ broker(),
+ handle(object()->shared_function_info(), broker()->isolate()));
+ }
+
+ return SharedFunctionInfoRef(
+ broker(), data()->AsFeedbackVector()->shared_function_info());
+}
+
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
@@ -4007,10 +4050,27 @@ Float64 FixedDoubleArrayData::Get(int i) const {
return contents_[i];
}
+base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
+ const {
+ if (value().IsFeedbackVector()) {
+ FeedbackVectorRef vector = value().AsFeedbackVector();
+ if (vector.serialized()) {
+ return value().AsFeedbackVector().shared_function_info();
+ }
+ }
+ return base::nullopt;
+}
+
void FeedbackVectorRef::Serialize() {
data()->AsFeedbackVector()->Serialize(broker());
}
+bool FeedbackVectorRef::serialized() const {
+ DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
+ if (data_->should_access_heap()) return true;
+ return data()->AsFeedbackVector()->serialized();
+}
+
bool NameRef::IsUniqueName() const {
// Must match Name::IsUniqueName.
return IsInternalizedString() || IsSymbol();
@@ -4430,7 +4490,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 0cd5df2944..424da1df55 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -228,7 +228,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource const& source);
void CollectArrayAndObjectPrototypes();
- void SerializeTypedArrayStringTags();
PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 13bd6a1282..820928ec8c 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -27,6 +27,12 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kCheckClosure: {
+ FeedbackCellRef cell(broker(), FeedbackCellOf(node->op()));
+ FeedbackVectorRef feedback_vector = cell.value().AsFeedbackVector();
+ feedback_vector.Serialize();
+ break;
+ }
case IrOpcode::kHeapConstant: {
ObjectRef object(broker(), HeapConstantOf(node->op()));
if (object.IsJSFunction()) object.AsJSFunction().Serialize();
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 89233d3fea..f38cb51c1f 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -108,6 +108,20 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.num_functions = value_input_count;
return out;
}
+ if (m.IsCheckClosure()) {
+ DCHECK(!out.functions[0].has_value());
+ FeedbackCellRef feedback_cell(broker(), FeedbackCellOf(m.op()));
+ SharedFunctionInfoRef shared_info =
+ feedback_cell.shared_function_info().value();
+ out.shared_info = shared_info;
+ if (feedback_cell.value().IsFeedbackVector() &&
+ CanConsiderForInlining(broker(), shared_info,
+ feedback_cell.value().AsFeedbackVector())) {
+ out.bytecode[0] = shared_info.GetBytecodeArray();
+ }
+ out.num_functions = 1;
+ return out;
+ }
if (m.IsJSCreateClosure()) {
DCHECK(!out.functions[0].has_value());
CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 60c7626067..16a6fb2f0f 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -8,6 +8,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/tick-counter.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
@@ -317,13 +318,11 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// - JSConstruct(JSCreateClosure[shared](context), args..., new.target)
if (match.IsJSCreateClosure()) {
CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
-
- // TODO(turbofan): We might consider to eagerly create the feedback vector
- // in such a case (in {DetermineCallContext} below) eventually.
FeedbackCellRef cell(broker(), p.feedback_cell());
- if (!cell.value().IsFeedbackVector()) return base::nullopt;
-
- return SharedFunctionInfoRef(broker(), p.shared_info());
+ return cell.shared_function_info();
+ } else if (match.IsCheckClosure()) {
+ FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
+ return cell.shared_function_info();
}
return base::nullopt;
@@ -354,11 +353,22 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
// Load the feedback vector of the target by looking up its vector cell at
// the instantiation site (we only decide to inline if it's populated).
- FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
+ FeedbackCellRef cell(broker(), p.feedback_cell());
// The inlinee uses the locally provided context at instantiation.
*context_out = NodeProperties::GetContextInput(match.node());
return cell.value().AsFeedbackVector();
+ } else if (match.IsCheckClosure()) {
+ FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ *context_out = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
+ match.node(), effect, control);
+ NodeProperties::ReplaceEffectInput(node, effect);
+
+ return cell.value().AsFeedbackVector();
}
// Must succeed.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 3d730a07ef..3963edcbbd 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -1375,6 +1375,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
// Lazy deopt of the load iterator property
+ // TODO(v8:10047): Use TaggedIndexConstant here once deoptimizer supports it.
Node* call_slot = jsgraph()->SmiConstant(p.callFeedback().slot.ToInt());
Node* call_feedback = jsgraph()->HeapConstant(p.callFeedback().vector);
Node* lazy_deopt_parameters[] = {receiver, call_slot, call_feedback};
@@ -2635,6 +2636,15 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* etrue = effect;
Node* vtrue;
{
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination
+ // of the NumberLessThan above.
+ index = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(),
+ CheckBoundsParameters::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
+
// Perform the actual load
vtrue = etrue = graph()->NewNode(
simplified()->LoadTypedElement(external_array_type),
@@ -2696,6 +2706,15 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
{
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination
+ // of the NumberLessThan above.
+ index = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(),
+ CheckBoundsParameters::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
+
// Perform the actual store.
etrue = graph()->NewNode(
simplified()->StoreTypedElement(external_array_type),
@@ -2828,6 +2847,14 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* etrue = effect;
Node* vtrue;
{
+ // Do a real bounds check against {length}. This is in order to
+ // protect against a potential typer bug leading to the elimination of
+ // the NumberLessThan above.
+ index = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsParameters::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
+
// Perform the actual load
vtrue = etrue =
graph()->NewNode(simplified()->LoadElement(element_access),
@@ -3096,13 +3123,18 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
IsSafetyCheck::kCriticalSafetyCheck),
check, *control);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
-
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue;
+ // Do a real bounds check against {length}. This is in order to protect
+ // against a potential typer bug leading to the elimination of the
+ // NumberLessThan above.
+ Node* etrue = index = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsParameters::kAbortOnOutOfBounds),
+ index, length, *effect, if_true);
+ Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* vtrue = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, *effect, if_true);
+ masked_index, etrue, if_true);
vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index c5ebc2f6a1..47f931317e 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -428,8 +428,8 @@ JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
: AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
- empty_string_type_(Type::HeapConstant(broker, factory()->empty_string(),
- graph()->zone())),
+ empty_string_type_(
+ Type::Constant(broker, factory()->empty_string(), graph()->zone())),
pointer_comparable_type_(
Type::Union(Type::Oddball(),
Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
@@ -848,24 +848,19 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
JSBinopReduction r(this, node);
+ if (r.type().IsSingleton()) {
+ // Let ConstantFoldingReducer handle this.
+ return NoChange();
+ }
if (r.left() == r.right()) {
// x === x is always true if x != NaN
Node* replacement = graph()->NewNode(
simplified()->BooleanNot(),
graph()->NewNode(simplified()->ObjectIsNaN(), r.left()));
+ DCHECK(NodeProperties::GetType(replacement).Is(r.type()));
ReplaceWithValue(node, replacement);
return Replace(replacement);
}
- if (r.OneInputCannotBe(Type::NumericOrString())) {
- // For values with canonical representation (i.e. neither String nor
- // Numeric) an empty type intersection means the values cannot be strictly
- // equal.
- if (!r.left_type().Maybe(r.right_type())) {
- Node* replacement = jsgraph()->FalseConstant();
- ReplaceWithValue(node, replacement);
- return Replace(replacement);
- }
- }
if (r.BothInputsAre(Type::Unique())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual());
@@ -1530,7 +1525,7 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
}
bool NeedsArgumentAdaptorFrame(SharedFunctionInfoRef shared, int arity) {
- static const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ static const int sentinel = kDontAdaptArgumentsSentinel;
const int num_decl_parms = shared.internal_formal_parameter_count();
return (num_decl_parms != arity && num_decl_parms != sentinel);
}
@@ -1664,34 +1659,46 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- // Check if {target} is a known JSFunction.
+ // Check if we know the SharedFunctionInfo of {target}.
+ base::Optional<JSFunctionRef> function;
+ base::Optional<SharedFunctionInfoRef> shared;
+
if (target_type.IsHeapConstant() &&
target_type.AsHeapConstant()->Ref().IsJSFunction()) {
- JSFunctionRef function = target_type.AsHeapConstant()->Ref().AsJSFunction();
+ function = target_type.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << function);
+ if (!function->serialized()) {
+ TRACE_BROKER_MISSING(broker(), "data for function " << *function);
return NoChange();
}
- SharedFunctionInfoRef shared = function.shared();
+ shared = function->shared();
+ } else if (target->opcode() == IrOpcode::kJSCreateClosure) {
+ CreateClosureParameters const& ccp =
+ CreateClosureParametersOf(target->op());
+ shared = SharedFunctionInfoRef(broker(), ccp.shared_info());
+ } else if (target->opcode() == IrOpcode::kCheckClosure) {
+ FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
+ shared = cell.value().AsFeedbackVector().shared_function_info();
+ }
+ if (shared.has_value()) {
// Do not inline the call if we need to check whether to break at entry.
- if (shared.HasBreakInfo()) return NoChange();
+ if (shared->HasBreakInfo()) return NoChange();
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
- if (IsClassConstructor(shared.kind())) return NoChange();
+ if (IsClassConstructor(shared->kind())) return NoChange();
// Check if we need to convert the {receiver}, but bailout if it would
// require data from a foreign native context.
- if (is_sloppy(shared.language_mode()) && !shared.native() &&
+ if (is_sloppy(shared->language_mode()) && !shared->native() &&
!receiver_type.Is(Type::Receiver())) {
- if (!function.native_context().equals(
- broker()->target_native_context())) {
+ if (!function.has_value() || !function->native_context().equals(
+ broker()->target_native_context())) {
return NoChange();
}
Node* global_proxy =
- jsgraph()->Constant(function.native_context().global_proxy_object());
+ jsgraph()->Constant(function->native_context().global_proxy_object());
receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(convert_mode),
receiver, global_proxy, effect, control);
@@ -1711,20 +1718,20 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
- if (NeedsArgumentAdaptorFrame(shared, arity)) {
+ if (NeedsArgumentAdaptorFrame(*shared, arity)) {
// Check if it's safe to skip the arguments adaptor for {shared},
// that is whether the target function anyways cannot observe the
// actual arguments. Details can be found in this document at
// https://bit.ly/v8-faster-calls-with-arguments-mismatch and
// on the tracking bug at https://crbug.com/v8/8895
- if (shared.is_safe_to_skip_arguments_adaptor()) {
+ if (shared->is_safe_to_skip_arguments_adaptor()) {
// Currently we only support skipping arguments adaptor frames
// for strict mode functions, since there's Function.arguments
// legacy accessor, which is still available in sloppy mode.
- DCHECK_EQ(LanguageMode::kStrict, shared.language_mode());
+ DCHECK_EQ(LanguageMode::kStrict, shared->language_mode());
// Massage the arguments to match the expected number of arguments.
- int expected_argument_count = shared.internal_formal_parameter_count();
+ int expected_argument_count = shared->internal_formal_parameter_count();
for (; arity > expected_argument_count; --arity) {
node->RemoveInput(arity + 1);
}
@@ -1750,20 +1757,21 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(
graph()->zone(), 4,
- jsgraph()->Constant(shared.internal_formal_parameter_count()));
+ jsgraph()->Constant(shared->internal_formal_parameter_count()));
NodeProperties::ChangeOp(
node,
common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
}
- } else if (shared.HasBuiltinId() && Builtins::IsCpp(shared.builtin_id())) {
+ } else if (shared->HasBuiltinId() &&
+ Builtins::IsCpp(shared->builtin_id())) {
// Patch {node} to a direct CEntry call.
- ReduceBuiltin(jsgraph(), node, shared.builtin_id(), arity, flags);
- } else if (shared.HasBuiltinId()) {
- DCHECK(Builtins::HasJSLinkage(shared.builtin_id()));
+ ReduceBuiltin(jsgraph(), node, shared->builtin_id(), arity, flags);
+ } else if (shared->HasBuiltinId()) {
+ DCHECK(Builtins::HasJSLinkage(shared->builtin_id()));
// Patch {node} to a direct code object call.
Callable callable = Builtins::CallableFor(
- isolate(), static_cast<Builtins::Name>(shared.builtin_id()));
+ isolate(), static_cast<Builtins::Name>(shared->builtin_id()));
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
const CallInterfaceDescriptor& descriptor = callable.descriptor();
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 39c93c0328..e97ee820f3 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -401,7 +401,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- stack_slot, MachineType::AnyTagged()));
+ stack_slot, i < descriptor.GetParameterCount()
+ ? descriptor.GetParameterType(i)
+ : MachineType::AnyTagged()));
}
}
// Add context.
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 17a419bab3..b55f3cdcb7 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -18,7 +18,16 @@
#include "src/runtime/runtime.h"
#include "src/zone/zone.h"
+#if !defined(__clang__) && defined(_M_ARM64)
+// _M_ARM64 is an MSVC-specific macro that clang-cl emulates.
+#define NO_INLINE_FOR_ARM64_MSVC __declspec(noinline)
+#else
+#define NO_INLINE_FOR_ARM64_MSVC
+#endif
+
namespace v8 {
+class CFunctionInfo;
+
namespace internal {
class CallInterfaceDescriptor;
@@ -134,7 +143,9 @@ class LinkageLocation {
LocationField::kShift;
}
- bool IsRegister() const { return TypeField::decode(bit_field_) == REGISTER; }
+ NO_INLINE_FOR_ARM64_MSVC bool IsRegister() const {
+ return TypeField::decode(bit_field_) == REGISTER;
+ }
bool IsAnyRegister() const {
return IsRegister() && GetLocation() == ANY_REGISTER;
}
@@ -357,6 +368,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return allocatable_registers_ != 0;
}
+ // Stores the signature information for a fast API call - C++ functions
+ // that can be called directly from TurboFan.
+ void SetCFunctionInfo(const CFunctionInfo* c_function_info) {
+ c_function_info_ = c_function_info;
+ }
+ const CFunctionInfo* GetCFunctionInfo() const { return c_function_info_; }
+
private:
friend class Linkage;
@@ -374,6 +392,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const RegList allocatable_registers_;
const Flags flags_;
const char* const debug_name_;
+ const CFunctionInfo* c_function_info_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(CallDescriptor);
};
@@ -505,5 +524,6 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
} // namespace compiler
} // namespace internal
} // namespace v8
+#undef NO_INLINE_FOR_ARM64_MSVC
#endif // V8_COMPILER_LINKAGE_H_
diff --git a/deps/v8/src/compiler/machine-graph.cc b/deps/v8/src/compiler/machine-graph.cc
index 3f05d56a9a..0a00392f4b 100644
--- a/deps/v8/src/compiler/machine-graph.cc
+++ b/deps/v8/src/compiler/machine-graph.cc
@@ -32,6 +32,15 @@ Node* MachineGraph::IntPtrConstant(intptr_t value) {
: Int64Constant(static_cast<int64_t>(value));
}
+Node* MachineGraph::TaggedIndexConstant(intptr_t value) {
+ int32_t value32 = static_cast<int32_t>(value);
+ Node** loc = cache_.FindTaggedIndexConstant(value32);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->TaggedIndexConstant(value32));
+ }
+ return *loc;
+}
+
Node* MachineGraph::RelocatableInt32Constant(int32_t value,
RelocInfo::Mode rmode) {
Node** loc = cache_.FindRelocatableInt32Constant(
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index 86c3847211..9eb5998dfc 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -44,6 +44,8 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
// constants is probably not serializable.
Node* IntPtrConstant(intptr_t value);
+ Node* TaggedIndexConstant(intptr_t value);
+
Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 4a192cd3e1..9527074825 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -297,6 +297,27 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ Uint32BinopMatcher mand(m.left().node());
+ if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
+ mand.right().HasValue()) {
+ Uint32BinopMatcher mshift(mand.left().node());
+ // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
+ if (mshift.right().HasValue()) {
+ auto shift_bits = mshift.right().Value();
+ auto mask = mand.right().Value();
+ auto rhs = static_cast<uint32_t>(m.right().Value());
+ // Make sure that we won't shift data off the end.
+ if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
+ shift_bits <= base::bits::CountLeadingZeros(rhs)) {
+ node->ReplaceInput(
+ 0, Word32And(mshift.left().node(), mask << shift_bits));
+ node->ReplaceInput(1, Int32Constant(rhs << shift_bits));
+ return Changed(node);
+ }
+ }
+ }
+ }
break;
}
case IrOpcode::kWord64Equal: {
@@ -832,6 +853,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kBranch:
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
+ case IrOpcode::kTrapIf:
+ case IrOpcode::kTrapUnless:
+ return ReduceConditional(node);
default:
break;
}
@@ -1659,6 +1686,37 @@ Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kBranch ||
+ node->opcode() == IrOpcode::kDeoptimizeIf ||
+ node->opcode() == IrOpcode::kDeoptimizeUnless ||
+ node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+ // This reducer only applies operator reductions to the branch condition.
+ // Reductions involving control flow happen elsewhere. Non-zero inputs are
+ // considered true in all conditional ops.
+ NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
+ if (condition.IsWord32And()) {
+ Uint32BinopMatcher mand(condition.node());
+ if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
+ mand.right().HasValue()) {
+ Uint32BinopMatcher mshift(mand.left().node());
+ // Branch condition (x >> K1) & K2 => x & (K2 << K1)
+ if (mshift.right().HasValue()) {
+ auto shift_bits = mshift.right().Value();
+ auto mask = mand.right().Value();
+ // Make sure that we won't shift data off the end.
+ if (shift_bits <= base::bits::CountLeadingZeros(mask)) {
+ NodeProperties::ReplaceValueInput(
+ node, Word32And(mshift.left().node(), mask << shift_bits), 0);
+ return Changed(node);
+ }
+ }
+ }
+ }
+ return NoChange();
+}
+
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return mcgraph()->common();
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 2e869c9062..53c5d6fa68 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -109,6 +109,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
Reduction ReduceFloat64RoundDown(Node* node);
+ Reduction ReduceConditional(Node* node);
Graph* graph() const;
MachineGraph* mcgraph() const { return mcgraph_; }
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index aadf48c5b9..898182db31 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -389,6 +389,8 @@ MachineType AtomicOpType(Operator const* op) {
V(I32x4MaxU, Operator::kCommutative, 2, 0, 1) \
V(I32x4GtU, Operator::kNoProperties, 2, 0, 1) \
V(I32x4GeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4BitMask, Operator::kNoProperties, 1, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -419,6 +421,8 @@ MachineType AtomicOpType(Operator const* op) {
V(I16x8GtU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8GeU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8RoundingAverageU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8BitMask, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Shl, Operator::kNoProperties, 2, 0, 1) \
@@ -444,6 +448,8 @@ MachineType AtomicOpType(Operator const* op) {
V(I8x16GtU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16GeU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16RoundingAverageU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(I8x16BitMask, Operator::kNoProperties, 1, 0, 1) \
V(S128Load, Operator::kNoProperties, 2, 0, 1) \
V(S128Store, Operator::kNoProperties, 3, 0, 1) \
V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
@@ -614,56 +620,52 @@ struct MachineOperatorGlobalCache {
OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
-#define LOAD(Type) \
- struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct PoisonedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- PoisonedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kPoisonedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct UnalignedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- UnalignedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kUnalignedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct ProtectedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- ProtectedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kProtectedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
- 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Load##Type##Operator kLoad##Type; \
- PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
- UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>(IrOpcode::kLoad, \
+ Operator::kEliminatable, "Load", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct PoisonedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ PoisonedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
+ "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct UnalignedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ UnalignedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kUnalignedLoad, Operator::kEliminatable, \
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct ProtectedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ ProtectedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kProtectedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
+ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
- struct KIND##LoadTransform##TYPE##Operator final \
- : public Operator1<LoadTransformParameters> { \
- KIND##LoadTransform##TYPE##Operator() \
- : Operator1<LoadTransformParameters>( \
- IrOpcode::kLoadTransform, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- #KIND "LoadTransform", 2, 1, 1, 1, 1, 0, \
- LoadTransformParameters{LoadKind::k##KIND, \
- LoadTransformation::k##TYPE}) {} \
- }; \
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ struct KIND##LoadTransform##TYPE##Operator final \
+ : public Operator1<LoadTransformParameters> { \
+ KIND##LoadTransform##TYPE##Operator() \
+ : Operator1<LoadTransformParameters>( \
+ IrOpcode::kLoadTransform, Operator::kEliminatable, \
+ #KIND "LoadTransform", 2, 1, 1, 1, 1, 0, \
+ LoadTransformParameters{LoadKind::k##KIND, \
+ LoadTransformation::k##TYPE}) {} \
+ }; \
KIND##LoadTransform##TYPE##Operator k##KIND##LoadTransform##TYPE;
#define LOAD_TRANSFORM(TYPE) \
@@ -764,8 +766,7 @@ struct MachineOperatorGlobalCache {
: public Operator1<LoadRepresentation> { \
Word32AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
- IrOpcode::kWord32AtomicLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
"Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
@@ -777,8 +778,7 @@ struct MachineOperatorGlobalCache {
: public Operator1<LoadRepresentation> { \
Word64AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
- IrOpcode::kWord64AtomicLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index d3792be559..d4f9ffa0fd 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -629,6 +629,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4MaxU();
const Operator* I32x4GtU();
const Operator* I32x4GeU();
+ const Operator* I32x4Abs();
+ const Operator* I32x4BitMask();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -664,6 +666,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8GtU();
const Operator* I16x8GeU();
const Operator* I16x8RoundingAverageU();
+ const Operator* I16x8Abs();
+ const Operator* I16x8BitMask();
const Operator* I8x16Splat();
const Operator* I8x16ExtractLaneU(int32_t);
@@ -694,6 +698,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16GtU();
const Operator* I8x16GeU();
const Operator* I8x16RoundingAverageU();
+ const Operator* I8x16Abs();
+ const Operator* I8x16BitMask();
const Operator* S128Load();
const Operator* S128Store();
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
deleted file mode 100644
index 70e497ae60..0000000000
--- a/deps/v8/src/compiler/node-cache.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/node-cache.h"
-
-#include <cstring>
-
-#include "src/common/globals.h"
-#include "src/zone/zone-containers.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-enum { kInitialSize = 16u, kLinearProbe = 5u };
-
-} // namespace
-
-
-template <typename Key, typename Hash, typename Pred>
-struct NodeCache<Key, Hash, Pred>::Entry {
- Key key_;
- Node* value_;
-};
-
-
-template <typename Key, typename Hash, typename Pred>
-bool NodeCache<Key, Hash, Pred>::Resize(Zone* zone) {
- if (size_ >= max_) return false; // Don't grow past the maximum size.
-
- // Allocate a new block of entries 4x the size.
- Entry* old_entries = entries_;
- size_t old_size = size_ + kLinearProbe;
- size_ *= 4;
- size_t num_entries = size_ + kLinearProbe;
- entries_ = zone->NewArray<Entry>(num_entries);
- memset(static_cast<void*>(entries_), 0, sizeof(Entry) * num_entries);
-
- // Insert the old entries into the new block.
- for (size_t i = 0; i < old_size; ++i) {
- Entry* old = &old_entries[i];
- if (old->value_) {
- size_t hash = hash_(old->key_);
- size_t start = hash & (size_ - 1);
- size_t end = start + kLinearProbe;
- for (size_t j = start; j < end; ++j) {
- Entry* entry = &entries_[j];
- if (!entry->value_) {
- entry->key_ = old->key_;
- entry->value_ = old->value_;
- break;
- }
- }
- }
- }
- return true;
-}
-
-
-template <typename Key, typename Hash, typename Pred>
-Node** NodeCache<Key, Hash, Pred>::Find(Zone* zone, Key key) {
- size_t hash = hash_(key);
- if (!entries_) {
- // Allocate the initial entries and insert the first entry.
- size_t num_entries = kInitialSize + kLinearProbe;
- entries_ = zone->NewArray<Entry>(num_entries);
- size_ = kInitialSize;
- memset(static_cast<void*>(entries_), 0, sizeof(Entry) * num_entries);
- Entry* entry = &entries_[hash & (kInitialSize - 1)];
- entry->key_ = key;
- return &entry->value_;
- }
-
- for (;;) {
- // Search up to N entries after (linear probing).
- size_t start = hash & (size_ - 1);
- size_t end = start + kLinearProbe;
- for (size_t i = start; i < end; i++) {
- Entry* entry = &entries_[i];
- if (pred_(entry->key_, key)) return &entry->value_;
- if (!entry->value_) {
- entry->key_ = key;
- return &entry->value_;
- }
- }
-
- if (!Resize(zone)) break; // Don't grow past the maximum size.
- }
-
- // If resized to maximum and still didn't find space, overwrite an entry.
- Entry* entry = &entries_[hash & (size_ - 1)];
- entry->key_ = key;
- entry->value_ = nullptr;
- return &entry->value_;
-}
-
-
-template <typename Key, typename Hash, typename Pred>
-void NodeCache<Key, Hash, Pred>::GetCachedNodes(ZoneVector<Node*>* nodes) {
- if (entries_) {
- for (size_t i = 0; i < size_ + kLinearProbe; i++) {
- if (entries_[i].value_) nodes->push_back(entries_[i].value_);
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Instantiations
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) NodeCache<int32_t>;
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) NodeCache<int64_t>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) NodeCache<RelocInt32Key>;
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) NodeCache<RelocInt64Key>;
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index c99eeafad8..935e5778e3 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -8,6 +8,7 @@
#include "src/base/export-template.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -30,33 +31,25 @@ template <typename Key, typename Hash = base::hash<Key>,
typename Pred = std::equal_to<Key> >
class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) NodeCache final {
public:
- explicit NodeCache(unsigned max = 256)
- : entries_(nullptr), size_(0), max_(max) {}
+ explicit NodeCache(Zone* zone) : map_(zone) {}
~NodeCache() = default;
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
// returned by this method contains a non-nullptr node, the caller can use
- // that
- // node. Otherwise it is the responsibility of the caller to fill the entry
- // with a new node.
- // Note that a previous cache entry may be overwritten if the cache becomes
- // too full or encounters too many hash collisions.
- Node** Find(Zone* zone, Key key);
+ // that node. Otherwise it is the responsibility of the caller to fill the
+ // entry with a new node.
+ Node** Find(Key key) { return &(map_[key]); }
// Appends all nodes from this cache to {nodes}.
- void GetCachedNodes(ZoneVector<Node*>* nodes);
+ void GetCachedNodes(ZoneVector<Node*>* nodes) {
+ for (const auto& entry : map_) {
+ if (entry.second) nodes->push_back(entry.second);
+ }
+ }
private:
- struct Entry;
-
- Entry* entries_; // lazily-allocated hash entries.
- size_t size_;
- size_t max_;
- Hash hash_;
- Pred pred_;
-
- bool Resize(Zone* zone);
+ ZoneUnorderedMap<Key, Node*, Hash, Pred> map_;
DISALLOW_COPY_AND_ASSIGN(NodeCache);
};
@@ -78,17 +71,6 @@ using IntPtrNodeCache = Int32NodeCache;
using IntPtrNodeCache = Int64NodeCache;
#endif
-// Explicit instantiation declarations.
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) NodeCache<int32_t>;
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) NodeCache<int64_t>;
-
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) NodeCache<RelocInt32Key>;
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) NodeCache<RelocInt64Key>;
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 82bc179519..8c4e5e76d5 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -53,8 +53,13 @@ template <typename T, IrOpcode::Value kOpcode>
struct ValueMatcher : public NodeMatcher {
using ValueType = T;
- explicit ValueMatcher(Node* node)
- : NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
+ explicit ValueMatcher(Node* node) : NodeMatcher(node) {
+ static_assert(kOpcode != IrOpcode::kFoldConstant, "unsupported opcode");
+ if (node->opcode() == IrOpcode::kFoldConstant) {
+ node = node->InputAt(1);
+ }
+ DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
+ has_value_ = opcode() == kOpcode;
if (has_value_) {
value_ = OpParameter<T>(node->op());
}
@@ -110,6 +115,30 @@ inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
}
}
+template <>
+inline ValueMatcher<double, IrOpcode::kNumberConstant>::ValueMatcher(Node* node)
+ : NodeMatcher(node), value_(), has_value_(false) {
+ if (node->opcode() == IrOpcode::kNumberConstant) {
+ value_ = OpParameter<double>(node->op());
+ has_value_ = true;
+ } else if (node->opcode() == IrOpcode::kFoldConstant) {
+ node = node->InputAt(1);
+ DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
+ }
+}
+
+template <>
+inline ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>::ValueMatcher(
+ Node* node)
+ : NodeMatcher(node), value_(), has_value_(false) {
+ if (node->opcode() == IrOpcode::kHeapConstant) {
+ value_ = OpParameter<Handle<HeapObject>>(node->op());
+ has_value_ = true;
+ } else if (node->opcode() == IrOpcode::kFoldConstant) {
+ node = node->InputAt(1);
+ DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
+ }
+}
// A pattern matcher for integer constants.
template <typename T, IrOpcode::Value kOpcode>
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 7836c4425d..61af28841a 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -30,8 +30,6 @@
V(Return) \
V(TailCall) \
V(Terminate) \
- V(OsrNormalEntry) \
- V(OsrLoopEntry) \
V(Throw) \
V(End)
@@ -39,6 +37,7 @@
#define CONSTANT_OP_LIST(V) \
V(Int32Constant) \
V(Int64Constant) \
+ V(TaggedIndexConstant) \
V(Float32Constant) \
V(Float64Constant) \
V(ExternalConstant) \
@@ -49,31 +48,32 @@
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
-#define INNER_OP_LIST(V) \
- V(Select) \
- V(Phi) \
- V(EffectPhi) \
- V(InductionVariablePhi) \
- V(Checkpoint) \
- V(BeginRegion) \
- V(FinishRegion) \
- V(FrameState) \
- V(StateValues) \
- V(TypedStateValues) \
- V(ArgumentsElementsState) \
- V(ArgumentsLengthState) \
- V(ObjectState) \
- V(ObjectId) \
- V(TypedObjectState) \
- V(Call) \
- V(Parameter) \
- V(OsrValue) \
- V(LoopExit) \
- V(LoopExitValue) \
- V(LoopExitEffect) \
- V(Projection) \
- V(Retain) \
- V(MapGuard) \
+#define INNER_OP_LIST(V) \
+ V(Select) \
+ V(Phi) \
+ V(EffectPhi) \
+ V(InductionVariablePhi) \
+ V(Checkpoint) \
+ V(BeginRegion) \
+ V(FinishRegion) \
+ V(FrameState) \
+ V(StateValues) \
+ V(TypedStateValues) \
+ V(ArgumentsElementsState) \
+ V(ArgumentsLengthState) \
+ V(ObjectState) \
+ V(ObjectId) \
+ V(TypedObjectState) \
+ V(Call) \
+ V(Parameter) \
+ V(OsrValue) \
+ V(LoopExit) \
+ V(LoopExitValue) \
+ V(LoopExitEffect) \
+ V(Projection) \
+ V(Retain) \
+ V(MapGuard) \
+ V(FoldConstant) \
V(TypeGuard)
#define COMMON_OP_LIST(V) \
@@ -409,6 +409,7 @@
V(CheckSmi) \
V(CheckHeapObject) \
V(CheckFloat64Hole) \
+ V(CheckClosure) \
V(CheckNotTaggedHole) \
V(CheckEqualsInternalizedString) \
V(CheckEqualsSymbol) \
@@ -475,7 +476,8 @@
V(PoisonIndex) \
V(RuntimeAbort) \
V(AssertType) \
- V(DateNow)
+ V(DateNow) \
+ V(FastApiCall)
#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \
V(SpeculativeBigIntAdd) \
@@ -837,6 +839,8 @@
V(I32x4LeU) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I32x4Abs) \
+ V(I32x4BitMask) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -874,6 +878,8 @@
V(I16x8GtU) \
V(I16x8GeU) \
V(I16x8RoundingAverageU) \
+ V(I16x8Abs) \
+ V(I16x8BitMask) \
V(I8x16Splat) \
V(I8x16ExtractLaneU) \
V(I8x16ExtractLaneS) \
@@ -906,6 +912,8 @@
V(I8x16GtU) \
V(I8x16GeU) \
V(I8x16RoundingAverageU) \
+ V(I8x16Abs) \
+ V(I8x16BitMask) \
V(S128Load) \
V(S128Store) \
V(S128Zero) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 3745d10214..229c7864b2 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -20,21 +20,18 @@ namespace compiler {
OperationTyper::OperationTyper(JSHeapBroker* broker, Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
Factory* factory = broker->isolate()->factory();
- infinity_ = Type::NewConstant(V8_INFINITY, zone);
- minus_infinity_ = Type::NewConstant(-V8_INFINITY, zone);
+ infinity_ = Type::Constant(V8_INFINITY, zone);
+ minus_infinity_ = Type::Constant(-V8_INFINITY, zone);
Type truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero.Maybe(Type::Integral32()));
singleton_empty_string_ =
- Type::HeapConstant(broker, factory->empty_string(), zone);
- singleton_NaN_string_ =
- Type::HeapConstant(broker, factory->NaN_string(), zone);
- singleton_zero_string_ =
- Type::HeapConstant(broker, factory->zero_string(), zone);
- singleton_false_ = Type::HeapConstant(broker, factory->false_value(), zone);
- singleton_true_ = Type::HeapConstant(broker, factory->true_value(), zone);
- singleton_the_hole_ =
- Type::HeapConstant(broker, factory->the_hole_value(), zone);
+ Type::Constant(broker, factory->empty_string(), zone);
+ singleton_NaN_string_ = Type::Constant(broker, factory->NaN_string(), zone);
+ singleton_zero_string_ = Type::Constant(broker, factory->zero_string(), zone);
+ singleton_false_ = Type::Constant(broker, factory->false_value(), zone);
+ singleton_true_ = Type::Constant(broker, factory->true_value(), zone);
+ singleton_the_hole_ = Type::Hole();
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
@@ -1043,24 +1040,26 @@ Type OperationTyper::NumberMax(Type lhs, Type rhs) {
if (lhs.Maybe(Type::NaN()) || rhs.Maybe(Type::NaN())) {
type = Type::Union(type, Type::NaN(), zone());
}
- lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
- DCHECK(!lhs.IsNone());
- rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
- DCHECK(!rhs.IsNone());
- if (lhs.Is(cache_->kIntegerOrMinusZero) &&
- rhs.Is(cache_->kIntegerOrMinusZero)) {
- // TODO(turbofan): This could still be improved in ruling out -0 when
- // one of the inputs' min is 0.
- double max = std::max(lhs.Max(), rhs.Max());
- double min = std::max(lhs.Min(), rhs.Min());
+ if (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero())) {
+ type = Type::Union(type, Type::MinusZero(), zone());
+ }
+
+ if (!lhs.Is(cache_->kIntegerOrMinusZeroOrNaN) ||
+ !rhs.Is(cache_->kIntegerOrMinusZeroOrNaN)) {
+ return Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
+ }
+
+ lhs = Type::Intersect(lhs, cache_->kInteger, zone());
+ rhs = Type::Intersect(rhs, cache_->kInteger, zone());
+
+ if (!lhs.IsNone() || !rhs.IsNone()) {
+ double min = std::max(lhs.IsNone() ? -V8_INFINITY : lhs.Min(),
+ rhs.IsNone() ? -V8_INFINITY : rhs.Min());
+ double max = std::max(lhs.IsNone() ? -V8_INFINITY : lhs.Max(),
+ rhs.IsNone() ? -V8_INFINITY : rhs.Max());
type = Type::Union(type, Type::Range(min, max, zone()), zone());
- if (min <= 0.0 && 0.0 <= max &&
- (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero()))) {
- type = Type::Union(type, Type::MinusZero(), zone());
- }
- } else {
- type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
}
+
return type;
}
@@ -1075,22 +1074,26 @@ Type OperationTyper::NumberMin(Type lhs, Type rhs) {
if (lhs.Maybe(Type::NaN()) || rhs.Maybe(Type::NaN())) {
type = Type::Union(type, Type::NaN(), zone());
}
- lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
- DCHECK(!lhs.IsNone());
- rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
- DCHECK(!rhs.IsNone());
- if (lhs.Is(cache_->kIntegerOrMinusZero) &&
- rhs.Is(cache_->kIntegerOrMinusZero)) {
- double max = std::min(lhs.Max(), rhs.Max());
- double min = std::min(lhs.Min(), rhs.Min());
+ if (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero())) {
+ type = Type::Union(type, Type::MinusZero(), zone());
+ }
+
+ if (!lhs.Is(cache_->kIntegerOrMinusZeroOrNaN) ||
+ !rhs.Is(cache_->kIntegerOrMinusZeroOrNaN)) {
+ return Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
+ }
+
+ lhs = Type::Intersect(lhs, cache_->kInteger, zone());
+ rhs = Type::Intersect(rhs, cache_->kInteger, zone());
+
+ if (!lhs.IsNone() || !rhs.IsNone()) {
+ double min = std::min(lhs.IsNone() ? +V8_INFINITY : lhs.Min(),
+ rhs.IsNone() ? +V8_INFINITY : rhs.Min());
+ double max = std::min(lhs.IsNone() ? +V8_INFINITY : lhs.Max(),
+ rhs.IsNone() ? +V8_INFINITY : rhs.Max());
type = Type::Union(type, Type::Range(min, max, zone()), zone());
- if (min <= 0.0 && 0.0 <= max &&
- (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero()))) {
- type = Type::Union(type, Type::MinusZero(), zone());
- }
- } else {
- type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
}
+
return type;
}
@@ -1242,14 +1245,17 @@ Type OperationTyper::StrictEqual(Type lhs, Type rhs) {
(lhs.Max() < rhs.Min() || lhs.Min() > rhs.Max())) {
return singleton_false();
}
- if ((lhs.Is(Type::Hole()) || rhs.Is(Type::Hole())) && !lhs.Maybe(rhs)) {
- return singleton_false();
- }
- if (lhs.IsHeapConstant() && rhs.Is(lhs)) {
+ if (lhs.IsSingleton() && rhs.Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
+ DCHECK(lhs.Is(rhs));
+ DCHECK(lhs.Is(Type::NonInternal()) || lhs.Is(Type::Hole()));
return singleton_true();
}
+ if ((lhs.Is(Type::Unique()) || rhs.Is(Type::Unique())) && !lhs.Maybe(rhs)) {
+ // One of the inputs has a canonical representation but types don't overlap.
+ return singleton_false();
+ }
return Type::Boolean();
}
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index c393aa257b..23e844e164 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -12,6 +12,7 @@
#include "src/base/functional.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
+#include "src/objects/feedback-cell.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -47,9 +48,9 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
kNoThrow = 1 << 5, // Can never generate an exception.
kNoDeopt = 1 << 6, // Can never generate an eager deoptimization exit.
kFoldable = kNoRead | kNoWrite,
- kKontrol = kNoDeopt | kFoldable | kNoThrow,
kEliminatable = kNoDeopt | kNoWrite | kNoThrow,
- kPure = kNoDeopt | kNoRead | kNoWrite | kNoThrow | kIdempotent
+ kKontrol = kNoDeopt | kFoldable | kNoThrow,
+ kPure = kKontrol | kIdempotent
};
// List of all bits, for the visualizer.
@@ -230,20 +231,10 @@ struct OpEqualTo<double> : public base::bit_equal_to<double> {};
template <>
struct OpHash<double> : public base::bit_hash<double> {};
-template <>
-struct OpEqualTo<Handle<HeapObject>> : public Handle<HeapObject>::equal_to {};
-template <>
-struct OpHash<Handle<HeapObject>> : public Handle<HeapObject>::hash {};
-
-template <>
-struct OpEqualTo<Handle<String>> : public Handle<String>::equal_to {};
-template <>
-struct OpHash<Handle<String>> : public Handle<String>::hash {};
-
-template <>
-struct OpEqualTo<Handle<ScopeInfo>> : public Handle<ScopeInfo>::equal_to {};
-template <>
-struct OpHash<Handle<ScopeInfo>> : public Handle<ScopeInfo>::hash {};
+template <class T>
+struct OpEqualTo<Handle<T>> : public Handle<T>::equal_to {};
+template <class T>
+struct OpHash<Handle<T>> : public Handle<T>::hash {};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e7285f0074..71cef6fd70 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -2621,6 +2621,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
jsgraph, nullptr, source_positions, &node_origins,
should_optimize_jumps ? &jump_opt : nullptr, options);
+ PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
+ RuntimeCallTimerScope timer_scope(isolate,
+ RuntimeCallCounterId::kOptimizeCode);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
@@ -2672,6 +2675,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
data.graph(), data.jsgraph(), data.schedule(),
data.source_positions(), data.node_origins(),
data.jump_optimization_info(), options);
+ PipelineJobScope second_scope(&second_data,
+ isolate->counters()->runtime_call_stats());
second_data.set_verify_graph(FLAG_verify_csa);
PipelineImpl second_pipeline(&second_data);
second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
@@ -2765,7 +2770,8 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
static_cast<int>(code_generator->GetHandlerTableOffset()));
result.instr_buffer = instruction_buffer->ReleaseBuffer();
result.source_positions = code_generator->GetSourcePositionTable();
- result.protected_instructions = code_generator->GetProtectedInstructions();
+ result.protected_instructions_data =
+ code_generator->GetProtectedInstructionsData();
result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result.result_tier = wasm::ExecutionTier::kTurbofan;
@@ -2972,7 +2978,8 @@ void Pipeline::GenerateCodeForWasmFunction(
result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result->source_positions = code_generator->GetSourcePositionTable();
- result->protected_instructions = code_generator->GetProtectedInstructions();
+ result->protected_instructions_data =
+ code_generator->GetProtectedInstructionsData();
result->result_tier = wasm::ExecutionTier::kTurbofan;
if (data.info()->trace_turbo_json_enabled()) {
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 62f1ca739c..d0bac9bfc0 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -21,6 +21,7 @@ Reduction RedundancyElimination::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckBigInt:
case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckClosure:
case IrOpcode::kCheckEqualsInternalizedString:
case IrOpcode::kCheckEqualsSymbol:
case IrOpcode::kCheckFloat64Hole:
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 2c0cb76708..45d0ec994c 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -889,12 +889,12 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32 ||
use_info.type_check() == TypeCheckKind::kArrayIndex) {
- bool indentify_zeros = use_info.truncation().IdentifiesZeroAndMinusZero();
+ bool identify_zeros = use_info.truncation().IdentifiesZeroAndMinusZero();
if (output_type.Is(Type::Signed32()) ||
- (indentify_zeros && output_type.Is(Type::Signed32OrMinusZero()))) {
+ (identify_zeros && output_type.Is(Type::Signed32OrMinusZero()))) {
return node;
} else if (output_type.Is(Type::Unsigned32()) ||
- (indentify_zeros &&
+ (identify_zeros &&
output_type.Is(Type::Unsigned32OrMinusZero()))) {
op = simplified()->CheckedUint32ToInt32(use_info.feedback());
} else {
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index ad9e1d6041..78fa1fbe9d 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -196,6 +196,9 @@ class UseInfo {
static UseInfo Float32() {
return UseInfo(MachineRepresentation::kFloat32, Truncation::Any());
}
+ static UseInfo Float64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Any());
+ }
static UseInfo TruncatingFloat64(
IdentifyZeros identify_zeros = kDistinguishZeros) {
return UseInfo(MachineRepresentation::kFloat64,
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 8ae19bc0c5..c6e352d90a 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -87,7 +87,6 @@ namespace compiler {
#define IGNORED_BYTECODE_LIST(V) \
V(IncBlockCounter) \
- V(StackCheck) \
V(ThrowSuperAlreadyCalledIfNotHole) \
V(ThrowSuperNotCalledIfHole)
@@ -2082,7 +2081,8 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
speculation_mode = feedback.AsCall().speculation_mode();
// Incorporate target feedback into hints copy to simplify processing.
base::Optional<HeapObjectRef> target = feedback.AsCall().target();
- if (target.has_value() && target->map().is_callable()) {
+ if (target.has_value() &&
+ (target->map().is_callable() || target->IsFeedbackCell())) {
callee = callee.Copy(zone());
// TODO(mvstanton): if the map isn't callable then we have an allocation
// site, and it may make sense to add the Array JSFunction constant.
@@ -2092,8 +2092,19 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
new_target->AddConstant(target->object(), zone(), broker());
callee.AddConstant(target->object(), zone(), broker());
} else {
- // Call; target is callee.
- callee.AddConstant(target->object(), zone(), broker());
+ // Call; target is feedback cell or callee.
+ if (target->IsFeedbackCell() &&
+ target->AsFeedbackCell().value().IsFeedbackVector()) {
+ FeedbackVectorRef vector =
+ target->AsFeedbackCell().value().AsFeedbackVector();
+ vector.Serialize();
+ VirtualClosure virtual_closure(
+ vector.shared_function_info().object(), vector.object(),
+ Hints());
+ callee.AddVirtualClosure(virtual_closure, zone(), broker());
+ } else {
+ callee.AddConstant(target->object(), zone(), broker());
+ }
}
}
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index b5e0410d50..db5a736855 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -4,8 +4,10 @@
#include "src/compiler/simd-scalar-lowering.h"
+#include "src/codegen/machine-type.h"
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
@@ -111,6 +113,7 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4LeU) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I32x4Abs) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
@@ -183,7 +186,9 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8LtS) \
V(I16x8LeS) \
V(I16x8LtU) \
- V(I16x8LeU)
+ V(I16x8LeU) \
+ V(I16x8RoundingAverageU) \
+ V(I16x8Abs)
#define FOREACH_INT8X16_OPCODE(V) \
V(I8x16Splat) \
@@ -214,7 +219,9 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16LtU) \
V(I8x16LeU) \
V(S8x16Swizzle) \
- V(S8x16Shuffle)
+ V(S8x16Shuffle) \
+ V(I8x16RoundingAverageU) \
+ V(I8x16Abs)
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
@@ -268,6 +275,31 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kInt8x16;
break;
}
+ case IrOpcode::kLoadTransform: {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ switch (params.transformation) {
+ case LoadTransformation::kS8x16LoadSplat:
+ replacements_[node->id()].type = SimdType::kInt8x16;
+ break;
+ case LoadTransformation::kS16x8LoadSplat:
+ replacements_[node->id()].type = SimdType::kInt16x8;
+ break;
+ case LoadTransformation::kS32x4LoadSplat:
+ replacements_[node->id()].type = SimdType::kInt32x4;
+ break;
+ case LoadTransformation::kI16x8Load8x8S:
+ case LoadTransformation::kI16x8Load8x8U:
+ replacements_[node->id()].type = SimdType::kInt16x8;
+ break;
+ case LoadTransformation::kI32x4Load16x4S:
+ case LoadTransformation::kI32x4Load16x4U:
+ replacements_[node->id()].type = SimdType::kInt32x4;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ break;
+ }
default: {
switch (output->opcode()) {
case IrOpcode::kF32x4SConvertI32x4:
@@ -426,6 +458,84 @@ void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
}
}
+void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ MachineType load_rep = MachineType::None();
+ SimdType load_type = type;
+
+ // Load extends have a different machine type for loading.
+ switch (params.transformation) {
+ case LoadTransformation::kI16x8Load8x8S:
+ load_rep = MachineType::Int8();
+ load_type = SimdType::kInt8x16;
+ break;
+ case LoadTransformation::kI16x8Load8x8U:
+ load_rep = MachineType::Uint8();
+ load_type = SimdType::kInt8x16;
+ break;
+ case LoadTransformation::kI32x4Load16x4S:
+ load_rep = MachineType::Int16();
+ load_type = SimdType::kInt16x8;
+ break;
+ case LoadTransformation::kI32x4Load16x4U:
+ load_rep = MachineType::Uint16();
+ load_type = SimdType::kInt16x8;
+ break;
+ case LoadTransformation::kS8x16LoadSplat:
+ case LoadTransformation::kS16x8LoadSplat:
+ case LoadTransformation::kS32x4LoadSplat:
+ load_rep = MachineTypeFrom(type);
+ break;
+ default:
+ // Lowering for s64x2 is not implemented since lowering for 64x2
+ // operations doesn't work properly yet.
+ UNIMPLEMENTED();
+ }
+
+ DCHECK_NE(load_rep, MachineType::None());
+
+ const Operator* load_op;
+ switch (params.kind) {
+ case LoadKind::kNormal:
+ load_op = machine()->Load(load_rep);
+ break;
+ case LoadKind::kUnaligned:
+ load_op = machine()->UnalignedLoad(load_rep);
+ break;
+ case LoadKind::kProtected:
+ load_op = machine()->ProtectedLoad(load_rep);
+ break;
+ }
+
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ int num_lanes = NumLanes(type);
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ Node* effect_input = node->InputAt(2);
+ Node* control_input = node->InputAt(3);
+
+ if (type != load_type) {
+ // We load a smaller lane size, then extend to a larger lane size. So use
+ // the smaller lane size to calculte the index nodes for loads, but only
+ // actually load half of those lanes.
+ Node** indices = zone()->NewArray<Node*>(num_lanes * 2);
+ GetIndexNodes(index, indices, load_type);
+ for (int i = num_lanes - 1; i >= 0; --i) {
+ rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
+ control_input);
+ effect_input = rep_nodes[i];
+ }
+ } else {
+ // Load splat, load from the same index for every lane.
+ for (int i = num_lanes - 1; i >= 0; --i) {
+ rep_nodes[i] =
+ graph()->NewNode(load_op, base, index, effect_input, control_input);
+ effect_input = rep_nodes[i];
+ }
+ }
+ ReplaceNode(node, rep_nodes, num_lanes);
+}
+
void SimdScalarLowering::LowerStoreOp(Node* node) {
// For store operation, use replacement type of its input instead of the
// one of its effected node.
@@ -973,6 +1083,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerLoadOp(node, rep_type);
break;
}
+ case IrOpcode::kLoadTransform: {
+ LowerLoadTransformOp(node, rep_type);
+ break;
+ }
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
case IrOpcode::kProtectedStore: {
@@ -1127,6 +1241,30 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kI32x4Abs:
+ case IrOpcode::kI16x8Abs:
+ case IrOpcode::kI8x16Abs: {
+ // From https://stackoverflow.com/a/14194764
+ // abs(x) = (x XOR y) - y
+ Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ // It's fine to shift by 31 even for i8x16 since each node is
+ // effectively expanded to 32 bits.
+ Node* y = graph()->NewNode(machine()->Word32Sar(), rep[i],
+ mcgraph_->Int32Constant(31));
+ rep_node[i] = graph()->NewNode(
+ machine()->Int32Sub(),
+ graph()->NewNode(machine()->Word32Xor(), rep[i], y), y);
+ if (node->opcode() == IrOpcode::kI16x8Neg) {
+ rep_node[i] = FixUpperBits(rep_node[i], kShift16);
+ } else if (node->opcode() == IrOpcode::kI8x16Neg) {
+ rep_node[i] = FixUpperBits(rep_node[i], kShift8);
+ }
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+ break;
+ }
case IrOpcode::kS128Zero: {
DCHECK_EQ(0, node->InputCount());
Node* rep_node[kNumLanes32];
@@ -1506,6 +1644,26 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kI8x16RoundingAverageU:
+ case IrOpcode::kI16x8RoundingAverageU: {
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ // rounding_average(left, right) = (left + right + 1) >> 1
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* left_plus_right_plus_one = graph()->NewNode(
+ machine()->Int32Add(),
+ graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]),
+ mcgraph_->Int32Constant(1));
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shr(), left_plus_right_plus_one,
+ mcgraph_->Int32Constant(1));
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+ break;
+ }
default: { DefaultLowering(node); }
}
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 76723fcc77..d91e6285f4 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -85,6 +85,7 @@ class SimdScalarLowering {
void SetLoweredType(Node* node, Node* output);
void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(Node* node, SimdType type);
+ void LowerLoadTransformOp(Node* node, SimdType type);
void LowerStoreOp(Node* node);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool not_horizontal = true);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 8997a5a831..aa222af96c 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -6,8 +6,11 @@
#include <limits>
+#include "include/v8-fast-api-calls.h"
#include "src/base/bits.h"
+#include "src/base/small-vector.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
@@ -1643,7 +1646,8 @@ class RepresentationSelector {
}
void VisitCheckBounds(Node* node, SimplifiedLowering* lowering) {
- CheckParameters const& p = CheckParametersOf(node->op());
+ CheckBoundsParameters const& p = CheckBoundsParametersOf(node->op());
+ FeedbackSource const& feedback = p.check_parameters().feedback();
Type const index_type = TypeOf(node->InputAt(0));
Type const length_type = TypeOf(node->InputAt(1));
if (length_type.Is(Type::Unsigned31())) {
@@ -1654,8 +1658,7 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) {
- CheckBoundsParameters::Mode mode =
- CheckBoundsParameters::kDeoptOnOutOfBounds;
+ CheckBoundsParameters::Mode mode = p.mode();
if (lowering->poisoning_level_ ==
PoisoningMitigationLevel::kDontPoison &&
(index_type.IsNone() || length_type.IsNone() ||
@@ -1666,32 +1669,126 @@ class RepresentationSelector {
mode = CheckBoundsParameters::kAbortOnOutOfBounds;
}
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(p.feedback(), mode));
+ node, simplified()->CheckedUint32Bounds(feedback, mode));
}
} else {
- VisitBinop(node, UseInfo::CheckedTaggedAsArrayIndex(p.feedback()),
+ VisitBinop(node, UseInfo::CheckedTaggedAsArrayIndex(feedback),
UseInfo::Word(), MachineType::PointerRepresentation());
if (lower()) {
if (jsgraph_->machine()->Is64()) {
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(p.feedback()));
+ node, simplified()->CheckedUint64Bounds(feedback, p.mode()));
} else {
NodeProperties::ChangeOp(
- node,
- simplified()->CheckedUint32Bounds(
- p.feedback(), CheckBoundsParameters::kDeoptOnOutOfBounds));
+ node, simplified()->CheckedUint32Bounds(feedback, p.mode()));
}
}
}
} else {
- DCHECK(length_type.Is(type_cache_->kPositiveSafeInteger));
+ CHECK(length_type.Is(type_cache_->kPositiveSafeInteger));
VisitBinop(node,
- UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, p.feedback()),
+ UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback),
UseInfo::Word64(), MachineRepresentation::kWord64);
if (lower()) {
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(p.feedback()));
+ node, simplified()->CheckedUint64Bounds(feedback, p.mode()));
+ }
+ }
+ }
+
+ static MachineType MachineTypeFor(CTypeInfo::Type type) {
+ switch (type) {
+ case CTypeInfo::Type::kVoid:
+ return MachineType::Int32();
+ case CTypeInfo::Type::kBool:
+ return MachineType::Bool();
+ case CTypeInfo::Type::kInt32:
+ return MachineType::Int32();
+ case CTypeInfo::Type::kUint32:
+ return MachineType::Uint32();
+ case CTypeInfo::Type::kInt64:
+ return MachineType::Int64();
+ case CTypeInfo::Type::kUint64:
+ return MachineType::Uint64();
+ case CTypeInfo::Type::kFloat32:
+ return MachineType::Float32();
+ case CTypeInfo::Type::kFloat64:
+ return MachineType::Float64();
+ case CTypeInfo::Type::kUnwrappedApiObject:
+ return MachineType::Pointer();
+ }
+ }
+
+ UseInfo UseInfoForFastApiCallArgument(CTypeInfo::Type type,
+ FeedbackSource const& feedback) {
+ switch (type) {
+ case CTypeInfo::Type::kVoid:
+ UNREACHABLE();
+ case CTypeInfo::Type::kBool:
+ return UseInfo::Bool();
+ case CTypeInfo::Type::kInt32:
+ case CTypeInfo::Type::kUint32:
+ case CTypeInfo::Type::kFloat32:
+ return UseInfo::CheckedNumberAsWord32(feedback);
+ case CTypeInfo::Type::kInt64:
+ return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback);
+ case CTypeInfo::Type::kFloat64:
+ return UseInfo::CheckedNumberAsFloat64(kIdentifyZeros, feedback);
+ // UseInfo::Word64 does not propagate any TypeCheckKind, so it relies
+ // on the implicit assumption that Word64 representation only holds
+ // Numbers, which is already no longer true with BigInts. By now,
+ // BigInts are handled in a very conservative way to make sure they don't
+ // fall into that pit, but future changes may break this here.
+ case CTypeInfo::Type::kUint64:
+ return UseInfo::Word64();
+ case CTypeInfo::Type::kUnwrappedApiObject:
+ return UseInfo::Word();
+ }
+ }
+
+ static constexpr int kInitialArgumentsCount = 10;
+
+ void VisitFastApiCall(Node* node) {
+ FastApiCallParameters const& params = FastApiCallParametersOf(node->op());
+ const CFunctionInfo* c_signature = params.signature();
+ int c_arg_count = c_signature->ArgumentCount();
+ int value_input_count = node->op()->ValueInputCount();
+ // function, ... C args
+ CHECK_EQ(c_arg_count + 1, value_input_count);
+
+ base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info(
+ c_arg_count);
+ ProcessInput(node, 0, UseInfo::Word());
+ // Propagate representation information from TypeInfo.
+ for (int i = 0; i < c_arg_count; i++) {
+ arg_use_info[i] = UseInfoForFastApiCallArgument(
+ c_signature->ArgumentInfo(i).GetType(), params.feedback());
+ ProcessInput(node, i + 1, arg_use_info[i]);
+ }
+
+ MachineType return_type =
+ MachineTypeFor(c_signature->ReturnInfo().GetType());
+ SetOutput(node, return_type.representation());
+
+ if (lower()) {
+ MachineSignature::Builder builder(graph()->zone(), 1, c_arg_count);
+ builder.AddReturn(return_type);
+ for (int i = 0; i < c_arg_count; ++i) {
+ MachineType machine_type =
+ MachineTypeFor(c_signature->ArgumentInfo(i).GetType());
+ // Here the arg_use_info are indexed starting from 1 because of the
+ // function input, while this loop is only over the actual arguments.
+ DCHECK_EQ(arg_use_info[i].representation(),
+ machine_type.representation());
+ builder.AddParam(machine_type);
}
+
+ CallDescriptor* call_descriptor = Linkage::GetSimplifiedCDescriptor(
+ graph()->zone(), builder.Build(), CallDescriptor::kNoFlags);
+
+ call_descriptor->SetCFunctionInfo(c_signature);
+
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
}
@@ -3428,6 +3525,12 @@ class RepresentationSelector {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
return;
}
+ case IrOpcode::kCheckClosure: {
+ VisitUnop(node,
+ UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kConvertTaggedHoleToUndefined: {
if (InputIs(node, Type::NumberOrOddball()) &&
truncation.IsUsedAsWord32()) {
@@ -3510,6 +3613,7 @@ class RepresentationSelector {
return VisitObjectState(node);
case IrOpcode::kObjectId:
return SetOutput(node, MachineRepresentation::kTaggedPointer);
+
case IrOpcode::kTypeGuard: {
// We just get rid of the sigma here, choosing the best representation
// for the sigma's type.
@@ -3530,6 +3634,10 @@ class RepresentationSelector {
return;
}
+ case IrOpcode::kFoldConstant:
+ VisitInputs(node);
+ return SetOutput(node, MachineRepresentation::kTaggedPointer);
+
case IrOpcode::kFinishRegion:
VisitInputs(node);
// Assume the output is tagged pointer.
@@ -3557,6 +3665,11 @@ class RepresentationSelector {
return;
}
+ case IrOpcode::kFastApiCall: {
+ VisitFastApiCall(node);
+ return;
+ }
+
// Operators with all inputs tagged and no or tagged output have uniform
// handling.
case IrOpcode::kEnd:
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 41547fd132..2b1e0ab99e 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -4,11 +4,13 @@
#include "src/compiler/simplified-operator.h"
+#include "include/v8-fast-api-calls.h"
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
#include "src/handles/handles-inl.h"
+#include "src/objects/feedback-cell.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
#include "src/objects/objects-inl.h"
@@ -820,7 +822,6 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckedUint32Mod, 2, 1)
#define CHECKED_WITH_FEEDBACK_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
V(CheckNumber, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
@@ -834,11 +835,13 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckedTaggedToTaggedSigned, 1, 1) \
V(CheckedUint32ToInt32, 1, 1) \
V(CheckedUint32ToTaggedSigned, 1, 1) \
- V(CheckedUint64Bounds, 2, 1) \
V(CheckedUint64ToInt32, 1, 1) \
V(CheckedUint64ToTaggedSigned, 1, 1)
-#define CHECKED_BOUNDS_OP_LIST(V) V(CheckedUint32Bounds)
+#define CHECKED_BOUNDS_OP_LIST(V) \
+ V(CheckBounds) \
+ V(CheckedUint32Bounds) \
+ V(CheckedUint64Bounds)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -851,16 +854,14 @@ struct SimplifiedOperatorGlobalCache final {
PURE_OP_LIST(PURE)
#undef PURE
-#define EFFECT_DEPENDENT(Name, properties, value_input_count, \
- control_input_count) \
- struct Name##Operator final : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, \
- Operator::kNoDeopt | Operator::kNoWrite | \
- Operator::kNoThrow | properties, \
- #Name, value_input_count, 1, control_input_count, 1, 1, \
- 0) {} \
- }; \
+#define EFFECT_DEPENDENT(Name, properties, value_input_count, \
+ control_input_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, Operator::kEliminatable | properties, \
+ #Name, value_input_count, 1, control_input_count, 1, 1, \
+ 0) {} \
+ }; \
Name##Operator k##Name;
EFFECT_DEPENDENT_OP_LIST(EFFECT_DEPENDENT)
#undef EFFECT_DEPENDENT
@@ -1126,10 +1127,9 @@ struct SimplifiedOperatorGlobalCache final {
LoadStackArgumentOperator()
: Operator( // --
IrOpcode::kLoadStackArgument, // opcode
- Operator::kNoDeopt | Operator::kNoThrow |
- Operator::kNoWrite, // flags
- "LoadStackArgument", // name
- 2, 1, 1, 1, 1, 0) {} // counts
+ Operator::kEliminatable, // flags
+ "LoadStackArgument", // name
+ 2, 1, 1, 1, 1, 0) {} // counts
};
LoadStackArgumentOperator kLoadStackArgument;
@@ -1256,6 +1256,16 @@ const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
"AssertType", 1, 0, 0, 1, 0, 0, type);
}
+const Operator* SimplifiedOperatorBuilder::FastApiCall(
+ const CFunctionInfo* signature, FeedbackSource const& feedback) {
+ // function, c args
+ int value_input_count = signature->ArgumentCount() + 1;
+ return new (zone()) Operator1<FastApiCallParameters>(
+ IrOpcode::kFastApiCall, Operator::kNoThrow, "FastApiCall",
+ value_input_count, 1, 1, 1, 1, 0,
+ FastApiCallParameters(signature, feedback));
+}
+
const Operator* SimplifiedOperatorBuilder::CheckIf(
DeoptimizeReason reason, const FeedbackSource& feedback) {
if (!feedback.IsValid()) {
@@ -1476,6 +1486,21 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
1, 1, 1, 0, hint);
}
+const Operator* SimplifiedOperatorBuilder::CheckClosure(
+ const Handle<FeedbackCell>& feedback_cell) {
+ return new (zone()) Operator1<Handle<FeedbackCell>>( // --
+ IrOpcode::kCheckClosure, // opcode
+ Operator::kNoThrow | Operator::kNoWrite, // flags
+ "CheckClosure", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ feedback_cell); // parameter
+}
+
+Handle<FeedbackCell> FeedbackCellOf(const Operator* op) {
+ DCHECK(IrOpcode::kCheckClosure == op->opcode());
+ return OpParameter<Handle<FeedbackCell>>(op);
+}
+
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
NumberOperationHint hint, const FeedbackSource& feedback) {
if (!feedback.IsValid()) {
@@ -1589,7 +1614,9 @@ std::ostream& operator<<(std::ostream& os, CheckParameters const& p) {
}
CheckParameters const& CheckParametersOf(Operator const* op) {
- if (op->opcode() == IrOpcode::kCheckedUint32Bounds) {
+ if (op->opcode() == IrOpcode::kCheckBounds ||
+ op->opcode() == IrOpcode::kCheckedUint32Bounds ||
+ op->opcode() == IrOpcode::kCheckedUint64Bounds) {
return OpParameter<CheckBoundsParameters>(op).check_parameters();
}
#define MAKE_OR(name, arg2, arg3) op->opcode() == IrOpcode::k##name ||
@@ -1622,7 +1649,9 @@ std::ostream& operator<<(std::ostream& os, CheckBoundsParameters const& p) {
}
CheckBoundsParameters const& CheckBoundsParametersOf(Operator const* op) {
- CHECK_EQ(op->opcode(), IrOpcode::kCheckedUint32Bounds);
+ DCHECK(op->opcode() == IrOpcode::kCheckBounds ||
+ op->opcode() == IrOpcode::kCheckedUint32Bounds ||
+ op->opcode() == IrOpcode::kCheckedUint64Bounds);
return OpParameter<CheckBoundsParameters>(op);
}
@@ -1679,12 +1708,30 @@ int NewArgumentsElementsMappedCountOf(const Operator* op) {
return OpParameter<int>(op);
}
+FastApiCallParameters const& FastApiCallParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kFastApiCall, op->opcode());
+ return OpParameter<FastApiCallParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, FastApiCallParameters const& p) {
+ return os << p.signature() << ", " << p.feedback();
+}
+
+size_t hash_value(FastApiCallParameters const& p) {
+ return base::hash_combine(p.signature(),
+ FeedbackSource::Hash()(p.feedback()));
+}
+
+bool operator==(FastApiCallParameters const& lhs,
+ FastApiCallParameters const& rhs) {
+ return lhs.signature() == rhs.signature() && lhs.feedback() == rhs.feedback();
+}
+
const Operator* SimplifiedOperatorBuilder::Allocate(Type type,
AllocationType allocation) {
return new (zone()) Operator1<AllocateParameters>(
- IrOpcode::kAllocate,
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, "Allocate",
- 1, 1, 1, 1, 1, 0, AllocateParameters(type, allocation));
+ IrOpcode::kAllocate, Operator::kEliminatable, "Allocate", 1, 1, 1, 1, 1,
+ 0, AllocateParameters(type, allocation));
}
const Operator* SimplifiedOperatorBuilder::AllocateRaw(
@@ -1696,10 +1743,8 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
allocation == AllocationType::kYoung &&
!FLAG_young_generation_large_objects));
return new (zone()) Operator1<AllocateParameters>(
- IrOpcode::kAllocateRaw,
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
- "AllocateRaw", 1, 1, 1, 1, 1, 1,
- AllocateParameters(type, allocation, allow_large_objects));
+ IrOpcode::kAllocateRaw, Operator::kEliminatable, "AllocateRaw", 1, 1, 1,
+ 1, 1, 1, AllocateParameters(type, allocation, allow_large_objects));
}
#define SPECULATIVE_NUMBER_BINOP(Name) \
@@ -1747,10 +1792,8 @@ ACCESS_OP_LIST(ACCESS)
#undef ACCESS
const Operator* SimplifiedOperatorBuilder::LoadMessage() {
- return new (zone())
- Operator(IrOpcode::kLoadMessage,
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
- "LoadMessage", 1, 1, 1, 1, 1, 0);
+ return new (zone()) Operator(IrOpcode::kLoadMessage, Operator::kEliminatable,
+ "LoadMessage", 1, 1, 1, 1, 1, 0);
}
const Operator* SimplifiedOperatorBuilder::StoreMessage() {
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 4fc9a4be78..251bb43678 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -106,7 +106,9 @@ struct FieldAccess {
write_barrier_kind(write_barrier_kind),
load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
- is_store_in_literal(is_store_in_literal) {}
+ is_store_in_literal(is_store_in_literal) {
+ DCHECK_GE(offset, 0);
+ }
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -300,6 +302,9 @@ bool operator==(CheckFloat64HoleParameters const&,
bool operator!=(CheckFloat64HoleParameters const&,
CheckFloat64HoleParameters const&);
+// Parameter for CheckClosure node.
+Handle<FeedbackCell> FeedbackCellOf(const Operator* op);
+
enum class CheckTaggedInputMode : uint8_t {
kNumber,
kNumberOrOddball,
@@ -580,6 +585,30 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) V8_WARN_UNUSED_RESULT;
int NewArgumentsElementsMappedCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+class FastApiCallParameters {
+ public:
+ explicit FastApiCallParameters(const CFunctionInfo* signature,
+ FeedbackSource const& feedback)
+ : signature_(signature), feedback_(feedback) {}
+
+ const CFunctionInfo* signature() const { return signature_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+
+ private:
+ const CFunctionInfo* signature_;
+ const FeedbackSource feedback_;
+};
+
+FastApiCallParameters const& FastApiCallParametersOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ FastApiCallParameters const&);
+
+size_t hash_value(FastApiCallParameters const&);
+
+bool operator==(FastApiCallParameters const&, FastApiCallParameters const&);
+
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
// indexing into objects and arrays, etc.
@@ -754,7 +783,15 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
- const Operator* CheckBounds(const FeedbackSource& feedback);
+ const Operator* CheckBounds(const FeedbackSource& feedback,
+ CheckBoundsParameters::Mode mode =
+ CheckBoundsParameters::kDeoptOnOutOfBounds);
+ const Operator* CheckedUint32Bounds(const FeedbackSource& feedback,
+ CheckBoundsParameters::Mode mode);
+ const Operator* CheckedUint64Bounds(const FeedbackSource& feedback,
+ CheckBoundsParameters::Mode mode);
+
+ const Operator* CheckClosure(const Handle<FeedbackCell>& feedback_cell);
const Operator* CheckEqualsInternalizedString();
const Operator* CheckEqualsSymbol();
const Operator* CheckFloat64Hole(CheckFloat64HoleMode, FeedbackSource const&);
@@ -799,11 +836,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const FeedbackSource& feedback);
const Operator* CheckedUint32Div();
const Operator* CheckedUint32Mod();
- const Operator* CheckedUint32Bounds(const FeedbackSource& feedback,
- CheckBoundsParameters::Mode mode);
const Operator* CheckedUint32ToInt32(const FeedbackSource& feedback);
const Operator* CheckedUint32ToTaggedSigned(const FeedbackSource& feedback);
- const Operator* CheckedUint64Bounds(const FeedbackSource& feedback);
const Operator* CheckedUint64ToInt32(const FeedbackSource& feedback);
const Operator* CheckedUint64ToTaggedSigned(const FeedbackSource& feedback);
@@ -920,6 +954,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* DateNow();
+ // Stores the signature and feedback of a fast C call
+ const Operator* FastApiCall(const CFunctionInfo* signature,
+ FeedbackSource const& feedback);
+
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 2bb5a0a4b5..b0fc8a9b71 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -4,6 +4,7 @@
#include "src/compiler/state-values-utils.h"
+#include "src/compiler/common-operator.h"
#include "src/utils/bit-vector.h"
namespace v8 {
@@ -240,9 +241,9 @@ void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
auto itend = access.end();
for (i = 0; it != itend; ++it, ++i) {
if (liveness == nullptr || liveness->Contains(liveness_offset + i)) {
- DCHECK_EQ((*it).node, values[i]);
+ DCHECK_EQ(it.node(), values[i]);
} else {
- DCHECK_NULL((*it).node);
+ DCHECK_NULL(it.node());
}
}
DCHECK_EQ(static_cast<size_t>(i), count);
@@ -329,13 +330,20 @@ void StateValuesAccess::iterator::Pop() {
current_depth_--;
}
-bool StateValuesAccess::iterator::done() const { return current_depth_ < 0; }
-
void StateValuesAccess::iterator::Advance() {
Top()->Advance();
EnsureValid();
}
+size_t StateValuesAccess::iterator::AdvanceTillNotEmpty() {
+ size_t count = 0;
+ while (!done() && Top()->IsEmpty()) {
+ count += Top()->AdvanceToNextRealOrEnd();
+ EnsureValid();
+ }
+ return count;
+}
+
void StateValuesAccess::iterator::EnsureValid() {
while (true) {
SparseInputMask::InputIterator* top = Top();
@@ -376,21 +384,18 @@ Node* StateValuesAccess::iterator::node() { return Top()->Get(nullptr); }
MachineType StateValuesAccess::iterator::type() {
Node* parent = Top()->parent();
+ DCHECK(!Top()->IsEmpty());
if (parent->opcode() == IrOpcode::kStateValues) {
return MachineType::AnyTagged();
} else {
DCHECK_EQ(IrOpcode::kTypedStateValues, parent->opcode());
- if (Top()->IsEmpty()) {
- return MachineType::None();
- } else {
- ZoneVector<MachineType> const* types = MachineTypesOf(parent->op());
- return (*types)[Top()->real_index()];
- }
+ ZoneVector<MachineType> const* types = MachineTypesOf(parent->op());
+ return (*types)[Top()->real_index()];
}
}
-bool StateValuesAccess::iterator::operator!=(iterator const& other) {
+bool StateValuesAccess::iterator::operator!=(iterator const& other) const {
// We only allow comparison with end().
CHECK(other.done());
return !done();
@@ -406,8 +411,7 @@ StateValuesAccess::TypedNode StateValuesAccess::iterator::operator*() {
return TypedNode(node(), type());
}
-
-size_t StateValuesAccess::size() {
+size_t StateValuesAccess::size() const {
size_t count = 0;
SparseInputMask mask = SparseInputMaskOf(node_->op());
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 0ff5d218f1..7913f1df04 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_STATE_VALUES_UTILS_H_
#include <array>
+
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
@@ -91,20 +92,23 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
class V8_EXPORT_PRIVATE iterator {
public:
- // Bare minimum of operators needed for range iteration.
- bool operator!=(iterator const& other);
+ bool operator!=(iterator const& other) const;
iterator& operator++();
TypedNode operator*();
+ Node* node();
+ bool done() const { return current_depth_ < 0; }
+
+ // Returns the number of empty nodes that were skipped over.
+ size_t AdvanceTillNotEmpty();
+
private:
friend class StateValuesAccess;
iterator() : current_depth_(-1) {}
explicit iterator(Node* node);
- Node* node();
MachineType type();
- bool done() const;
void Advance();
void EnsureValid();
@@ -119,9 +123,9 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
explicit StateValuesAccess(Node* node) : node_(node) {}
- size_t size();
- iterator begin() { return iterator(node_); }
- iterator end() { return iterator(); }
+ size_t size() const;
+ iterator begin() const { return iterator(node_); }
+ iterator end() const { return iterator(); }
private:
Node* node_;
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 3329154dbd..b71ea8455d 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -116,7 +116,7 @@ class V8_EXPORT_PRIVATE TypeCache final {
Type const kJSArrayBufferViewByteOffsetType = kJSArrayBufferByteLengthType;
// The JSTypedArray::length property always contains an untagged number in
- // the range [0, kMaxSmiValue].
+ // the range [0, JSTypedArray::kMaxLength].
Type const kJSTypedArrayLengthType =
CreateRange(0.0, JSTypedArray::kMaxLength);
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 0c81670f0b..50e2a640f6 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -26,9 +26,9 @@ TypedOptimization::TypedOptimization(Editor* editor,
jsgraph_(jsgraph),
broker_(broker),
true_type_(
- Type::HeapConstant(broker, factory()->true_value(), graph()->zone())),
- false_type_(Type::HeapConstant(broker, factory()->false_value(),
- graph()->zone())),
+ Type::Constant(broker, factory()->true_value(), graph()->zone())),
+ false_type_(
+ Type::Constant(broker, factory()->false_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {}
TypedOptimization::~TypedOptimization() = default;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index eacec94271..6d53531f1c 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -68,150 +68,75 @@ class Typer::Visitor : public Reducer {
Reduction Reduce(Node* node) override {
if (node->op()->ValueOutputCount() == 0) return NoChange();
- switch (node->opcode()) {
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- return UpdateType(node, TypeBinaryOp(node, x##Typer));
- JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- return UpdateType(node, Type##x(node));
- DECLARE_CASE(Start)
- DECLARE_CASE(IfException)
- // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
- COMMON_OP_LIST(DECLARE_CASE)
- SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
- JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
- JS_OBJECT_OP_LIST(DECLARE_CASE)
- JS_CONTEXT_OP_LIST(DECLARE_CASE)
- JS_OTHER_OP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- return UpdateType(node, TypeBinaryOp(node, x));
- SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- return UpdateType(node, TypeUnaryOp(node, x));
- SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
- SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) case IrOpcode::k##x:
- DECLARE_CASE(Loop)
- DECLARE_CASE(Branch)
- DECLARE_CASE(IfTrue)
- DECLARE_CASE(IfFalse)
- DECLARE_CASE(IfSuccess)
- DECLARE_CASE(Switch)
- DECLARE_CASE(IfValue)
- DECLARE_CASE(IfDefault)
- DECLARE_CASE(Merge)
- DECLARE_CASE(Deoptimize)
- DECLARE_CASE(DeoptimizeIf)
- DECLARE_CASE(DeoptimizeUnless)
- DECLARE_CASE(TrapIf)
- DECLARE_CASE(TrapUnless)
- DECLARE_CASE(Return)
- DECLARE_CASE(TailCall)
- DECLARE_CASE(Terminate)
- DECLARE_CASE(OsrNormalEntry)
- DECLARE_CASE(OsrLoopEntry)
- DECLARE_CASE(Throw)
- DECLARE_CASE(End)
- SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
- SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
- MACHINE_SIMD_OP_LIST(DECLARE_CASE)
- MACHINE_OP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
- break;
- }
- return NoChange();
+ return UpdateType(node, TypeNode(node));
}
Type TypeNode(Node* node) {
switch (node->opcode()) {
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: return TypeBinaryOp(node, x##Typer);
- JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
- DECLARE_CASE(Start)
- DECLARE_CASE(IfException)
- // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
- COMMON_OP_LIST(DECLARE_CASE)
- SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
- JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
- JS_OBJECT_OP_LIST(DECLARE_CASE)
- JS_CONTEXT_OP_LIST(DECLARE_CASE)
- JS_OTHER_OP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- return TypeBinaryOp(node, x);
- SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- return TypeUnaryOp(node, x);
- SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
- SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
- SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-#define DECLARE_CASE(x) case IrOpcode::k##x:
- DECLARE_CASE(Loop)
- DECLARE_CASE(Branch)
- DECLARE_CASE(IfTrue)
- DECLARE_CASE(IfFalse)
- DECLARE_CASE(IfSuccess)
- DECLARE_CASE(Switch)
- DECLARE_CASE(IfValue)
- DECLARE_CASE(IfDefault)
- DECLARE_CASE(Merge)
- DECLARE_CASE(Deoptimize)
- DECLARE_CASE(DeoptimizeIf)
- DECLARE_CASE(DeoptimizeUnless)
- DECLARE_CASE(TrapIf)
- DECLARE_CASE(TrapUnless)
- DECLARE_CASE(Return)
- DECLARE_CASE(TailCall)
- DECLARE_CASE(Terminate)
- DECLARE_CASE(OsrNormalEntry)
- DECLARE_CASE(OsrLoopEntry)
- DECLARE_CASE(Throw)
- DECLARE_CASE(End)
- SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
- SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
- MACHINE_SIMD_OP_LIST(DECLARE_CASE)
- MACHINE_OP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
- break;
+#define DECLARE_UNARY_CASE(x) \
+ case IrOpcode::k##x: \
+ return Type##x(Operand(node, 0));
+ JS_SIMPLE_UNOP_LIST(DECLARE_UNARY_CASE)
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_UNARY_CASE)
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_UNARY_CASE)
+#undef DECLARE_UNARY_CASE
+#define DECLARE_BINARY_CASE(x) \
+ case IrOpcode::k##x: \
+ return Type##x(Operand(node, 0), Operand(node, 1));
+ JS_SIMPLE_BINOP_LIST(DECLARE_BINARY_CASE)
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_BINARY_CASE)
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_BINARY_CASE)
+#undef DECLARE_BINARY_CASE
+#define DECLARE_OTHER_CASE(x) \
+ case IrOpcode::k##x: \
+ return Type##x(node);
+ DECLARE_OTHER_CASE(Start)
+ DECLARE_OTHER_CASE(IfException)
+ COMMON_OP_LIST(DECLARE_OTHER_CASE)
+ SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_OTHER_CASE)
+ SIMPLIFIED_OTHER_OP_LIST(DECLARE_OTHER_CASE)
+ JS_OBJECT_OP_LIST(DECLARE_OTHER_CASE)
+ JS_CONTEXT_OP_LIST(DECLARE_OTHER_CASE)
+ JS_OTHER_OP_LIST(DECLARE_OTHER_CASE)
+#undef DECLARE_OTHER_CASE
+#define DECLARE_IMPOSSIBLE_CASE(x) case IrOpcode::k##x:
+ DECLARE_IMPOSSIBLE_CASE(Loop)
+ DECLARE_IMPOSSIBLE_CASE(Branch)
+ DECLARE_IMPOSSIBLE_CASE(IfTrue)
+ DECLARE_IMPOSSIBLE_CASE(IfFalse)
+ DECLARE_IMPOSSIBLE_CASE(IfSuccess)
+ DECLARE_IMPOSSIBLE_CASE(Switch)
+ DECLARE_IMPOSSIBLE_CASE(IfValue)
+ DECLARE_IMPOSSIBLE_CASE(IfDefault)
+ DECLARE_IMPOSSIBLE_CASE(Merge)
+ DECLARE_IMPOSSIBLE_CASE(Deoptimize)
+ DECLARE_IMPOSSIBLE_CASE(DeoptimizeIf)
+ DECLARE_IMPOSSIBLE_CASE(DeoptimizeUnless)
+ DECLARE_IMPOSSIBLE_CASE(TrapIf)
+ DECLARE_IMPOSSIBLE_CASE(TrapUnless)
+ DECLARE_IMPOSSIBLE_CASE(Return)
+ DECLARE_IMPOSSIBLE_CASE(TailCall)
+ DECLARE_IMPOSSIBLE_CASE(Terminate)
+ DECLARE_IMPOSSIBLE_CASE(Throw)
+ DECLARE_IMPOSSIBLE_CASE(End)
+ SIMPLIFIED_CHANGE_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
+ SIMPLIFIED_CHECKED_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
+ MACHINE_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
+#undef DECLARE_IMPOSSIBLE_CASE
+ UNREACHABLE();
}
- UNREACHABLE();
}
Type TypeConstant(Handle<Object> value);
+ bool InductionVariablePhiTypeIsPrefixedPoint(
+ InductionVariable* induction_var);
+
private:
Typer* typer_;
LoopVariableOptimizer* induction_vars_;
@@ -225,7 +150,12 @@ class Typer::Visitor : public Reducer {
COMMON_OP_LIST(DECLARE_METHOD)
SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_OTHER_OP_LIST(DECLARE_METHOD)
- JS_OP_LIST(DECLARE_METHOD)
+ JS_OBJECT_OP_LIST(DECLARE_METHOD)
+ JS_CONTEXT_OP_LIST(DECLARE_METHOD)
+ JS_OTHER_OP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+#define DECLARE_METHOD(x) inline Type Type##x(Type input);
+ JS_SIMPLE_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
Type TypeOrNone(Node* node) {
@@ -251,8 +181,10 @@ class Typer::Visitor : public Reducer {
using UnaryTyperFun = Type (*)(Type, Typer* t);
using BinaryTyperFun = Type (*)(Type, Type, Typer* t);
- Type TypeUnaryOp(Node* node, UnaryTyperFun);
- Type TypeBinaryOp(Node* node, BinaryTyperFun);
+ inline Type TypeUnaryOp(Node* node, UnaryTyperFun);
+ inline Type TypeBinaryOp(Node* node, BinaryTyperFun);
+ inline Type TypeUnaryOp(Type input, UnaryTyperFun);
+ inline Type TypeBinaryOp(Type left, Type right, BinaryTyperFun);
static Type BinaryNumberOpTyper(Type lhs, Type rhs, Typer* t,
BinaryTyperFun f);
@@ -300,7 +232,28 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-
+#define DECLARE_METHOD(Name) \
+ inline Type Type##Name(Type left, Type right) { \
+ return TypeBinaryOp(left, right, Name##Typer); \
+ }
+ JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+#define DECLARE_METHOD(Name) \
+ inline Type Type##Name(Type left, Type right) { \
+ return TypeBinaryOp(left, right, Name); \
+ }
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+#define DECLARE_METHOD(Name) \
+ inline Type Type##Name(Type input) { return TypeUnaryOp(input, Name); }
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
static Type ObjectIsArrayBufferView(Type, Typer*);
static Type ObjectIsBigInt(Type, Typer*);
static Type ObjectIsCallable(Type, Typer*);
@@ -431,6 +384,14 @@ void Typer::Run(const NodeVector& roots,
graph_reducer.ReduceGraph();
if (induction_vars != nullptr) {
+ // Validate the types computed by TypeInductionVariablePhi.
+ for (auto entry : induction_vars->induction_variables()) {
+ InductionVariable* induction_var = entry.second;
+ if (induction_var->phi()->opcode() == IrOpcode::kInductionVariablePhi) {
+ CHECK(visitor.InductionVariablePhiTypeIsPrefixedPoint(induction_var));
+ }
+ }
+
induction_vars->ChangeToPhisAndInsertGuards();
}
}
@@ -461,12 +422,20 @@ void Typer::Decorator::Decorate(Node* node) {
Type Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
Type input = Operand(node, 0);
+ return TypeUnaryOp(input, f);
+}
+
+Type Typer::Visitor::TypeUnaryOp(Type input, UnaryTyperFun f) {
return input.IsNone() ? Type::None() : f(input, typer_);
}
Type Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Type left = Operand(node, 0);
Type right = Operand(node, 1);
+ return TypeBinaryOp(left, right, f);
+}
+
+Type Typer::Visitor::TypeBinaryOp(Type left, Type right, BinaryTyperFun f) {
return left.IsNone() || right.IsNone() ? Type::None()
: f(left, right, typer_);
}
@@ -561,13 +530,12 @@ Type Typer::Visitor::ToBoolean(Type type, Typer* t) {
Type Typer::Visitor::ToInteger(Type type, Typer* t) {
// ES6 section 7.1.4 ToInteger ( argument )
type = ToNumber(type, t);
- if (type.Is(t->cache_->kIntegerOrMinusZero)) return type;
+ if (type.Is(t->cache_->kInteger)) return type;
if (type.Is(t->cache_->kIntegerOrMinusZeroOrNaN)) {
- return Type::Union(
- Type::Intersect(type, t->cache_->kIntegerOrMinusZero, t->zone()),
- t->cache_->kSingletonZero, t->zone());
+ return Type::Union(Type::Intersect(type, t->cache_->kInteger, t->zone()),
+ t->cache_->kSingletonZero, t->zone());
}
- return t->cache_->kIntegerOrMinusZero;
+ return t->cache_->kInteger;
}
@@ -579,10 +547,10 @@ Type Typer::Visitor::ToLength(Type type, Typer* t) {
double min = type.Min();
double max = type.Max();
if (max <= 0.0) {
- return Type::NewConstant(0, t->zone());
+ return Type::Constant(0, t->zone());
}
if (min >= kMaxSafeInteger) {
- return Type::NewConstant(kMaxSafeInteger, t->zone());
+ return Type::Constant(kMaxSafeInteger, t->zone());
}
if (min <= 0.0) min = 0.0;
if (max >= kMaxSafeInteger) max = kMaxSafeInteger;
@@ -802,6 +770,8 @@ Type Typer::Visitor::TypeInt32Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeInt64Constant(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeTaggedIndexConstant(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeRelocatableInt32Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeRelocatableInt64Constant(Node* node) { UNREACHABLE(); }
@@ -812,7 +782,7 @@ Type Typer::Visitor::TypeFloat64Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeNumberConstant(Node* node) {
double number = OpParameter<double>(node->op());
- return Type::NewConstant(number, zone());
+ return Type::Constant(number, zone());
}
Type Typer::Visitor::TypeHeapConstant(Node* node) {
@@ -850,11 +820,13 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
Type initial_type = Operand(node, 0);
Type increment_type = Operand(node, 2);
- // Fallback to normal phi typing in a variety of cases: when the induction
- // variable is not initially of type Integer (we want to work with ranges
- // below), when the increment is zero (in that case, phi typing will generally
- // yield a better type), and when the induction variable can become NaN
- // (through addition/subtraction of opposing infinities).
+ // Fallback to normal phi typing in a variety of cases:
+ // - when the induction variable is not initially of type Integer, because we
+ // want to work with ranges in the algorithm below.
+ // - when the increment is zero, because in that case normal phi typing will
+ // generally yield a more precise type.
+ // - when the induction variable can become NaN (through addition/subtraction
+ // of opposing infinities), because the code below can't handle that case.
if (initial_type.IsNone() ||
increment_type.Is(typer_->cache_->kSingletonZero) ||
!initial_type.Is(typer_->cache_->kInteger) ||
@@ -873,9 +845,8 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
return type;
}
- // Now process the bounds.
auto res = induction_vars_->induction_variables().find(node->id());
- DCHECK(res != induction_vars_->induction_variables().end());
+ DCHECK_NE(res, induction_vars_->induction_variables().end());
InductionVariable* induction_var = res->second;
InductionVariable::ArithmeticType arithmetic_type = induction_var->Type();
@@ -888,13 +859,13 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
increment_min = increment_type.Min();
increment_max = increment_type.Max();
} else {
- DCHECK_EQ(InductionVariable::ArithmeticType::kSubtraction, arithmetic_type);
+ DCHECK_EQ(arithmetic_type, InductionVariable::ArithmeticType::kSubtraction);
increment_min = -increment_type.Max();
increment_max = -increment_type.Min();
}
if (increment_min >= 0) {
- // increasing sequence
+ // Increasing sequence.
min = initial_type.Min();
for (auto bound : induction_var->upper_bounds()) {
Type bound_type = TypeOrNone(bound.bound);
@@ -914,7 +885,7 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
// The upper bound must be at least the initial value's upper bound.
max = std::max(max, initial_type.Max());
} else if (increment_max <= 0) {
- // decreasing sequence
+ // Decreasing sequence.
max = initial_type.Max();
for (auto bound : induction_var->lower_bounds()) {
Type bound_type = TypeOrNone(bound.bound);
@@ -935,9 +906,12 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
min = std::min(min, initial_type.Min());
} else {
// If the increment can be both positive and negative, the variable can go
- // arbitrarily far.
- return typer_->cache_->kInteger;
+ // arbitrarily far. Use the maximal range in that case. Note that this may
+ // be less precise than what ordinary typing would produce.
+ min = -V8_INFINITY;
+ max = +V8_INFINITY;
}
+
if (FLAG_trace_turbo_loop) {
StdoutStream{} << std::setprecision(10) << "Loop ("
<< NodeProperties::GetControlInput(node)->id()
@@ -949,9 +923,68 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
<< " for phi " << node->id() << ": (" << min << ", " << max
<< ")\n";
}
+
return Type::Range(min, max, typer_->zone());
}
+bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
+ InductionVariable* induction_var) {
+ Node* node = induction_var->phi();
+ DCHECK_EQ(node->opcode(), IrOpcode::kInductionVariablePhi);
+ Type type = NodeProperties::GetType(node);
+ Type initial_type = Operand(node, 0);
+ Node* arith = node->InputAt(1);
+ Type increment_type = Operand(node, 2);
+
+ // Intersect {type} with useful bounds.
+ for (auto bound : induction_var->upper_bounds()) {
+ Type bound_type = TypeOrNone(bound.bound);
+ if (!bound_type.Is(typer_->cache_->kInteger)) continue;
+ if (!bound_type.IsNone()) {
+ bound_type = Type::Range(
+ -V8_INFINITY,
+ bound_type.Max() - (bound.kind == InductionVariable::kStrict),
+ zone());
+ }
+ type = Type::Intersect(type, bound_type, typer_->zone());
+ }
+ for (auto bound : induction_var->lower_bounds()) {
+ Type bound_type = TypeOrNone(bound.bound);
+ if (!bound_type.Is(typer_->cache_->kInteger)) continue;
+ if (!bound_type.IsNone()) {
+ bound_type = Type::Range(
+ bound_type.Min() + (bound.kind == InductionVariable::kStrict),
+ +V8_INFINITY, typer_->zone());
+ }
+ type = Type::Intersect(type, bound_type, typer_->zone());
+ }
+
+ // Apply ordinary typing to the "increment" operation.
+ // clang-format off
+ switch (arith->opcode()) {
+#define CASE(x) \
+ case IrOpcode::k##x: \
+ type = Type##x(type, increment_type); \
+ break;
+ CASE(JSAdd)
+ CASE(JSSubtract)
+ CASE(NumberAdd)
+ CASE(NumberSubtract)
+ CASE(SpeculativeNumberAdd)
+ CASE(SpeculativeNumberSubtract)
+ CASE(SpeculativeSafeIntegerAdd)
+ CASE(SpeculativeSafeIntegerSubtract)
+#undef CASE
+ default:
+ UNREACHABLE();
+ }
+ // clang-format on
+
+ type = Type::Union(initial_type, type, typer_->zone());
+
+ return type.Is(NodeProperties::GetType(node));
+}
+
Type Typer::Visitor::TypeEffectPhi(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoopExit(Node* node) { UNREACHABLE(); }
@@ -1005,6 +1038,8 @@ Type Typer::Visitor::TypeTypedObjectState(Node* node) {
Type Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
+Type Typer::Visitor::TypeFastApiCall(Node* node) { return Type::Any(); }
+
Type Typer::Visitor::TypeProjection(Node* node) {
Type const type = Operand(node, 0);
if (type.Is(Type::None())) return Type::None();
@@ -1022,6 +1057,8 @@ Type Typer::Visitor::TypeTypeGuard(Node* node) {
return typer_->operation_typer()->TypeTypeGuard(node->op(), type);
}
+Type Typer::Visitor::TypeFoldConstant(Node* node) { return Operand(node, 0); }
+
Type Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
Type Typer::Visitor::TypeDeadValue(Node* node) { return Type::None(); }
@@ -1042,9 +1079,10 @@ Type Typer::Visitor::JSEqualTyper(Type lhs, Type rhs, Typer* t) {
(lhs.Max() < rhs.Min() || lhs.Min() > rhs.Max())) {
return t->singleton_false_;
}
- if (lhs.IsHeapConstant() && rhs.Is(lhs)) {
+ if (lhs.IsSingleton() && rhs.Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
+ DCHECK(lhs.Is(rhs));
return t->singleton_true_;
}
return Type::Boolean();
@@ -1190,61 +1228,33 @@ Type Typer::Visitor::JSExponentiateTyper(Type lhs, Type rhs, Typer* t) {
// JS unary operators.
-Type Typer::Visitor::TypeJSBitwiseNot(Node* node) {
- return TypeUnaryOp(node, BitwiseNot);
-}
-
-Type Typer::Visitor::TypeJSDecrement(Node* node) {
- return TypeUnaryOp(node, Decrement);
-}
-
-Type Typer::Visitor::TypeJSIncrement(Node* node) {
- return TypeUnaryOp(node, Increment);
-}
-
-Type Typer::Visitor::TypeJSNegate(Node* node) {
- return TypeUnaryOp(node, Negate);
-}
+#define DEFINE_METHOD(Name) \
+ Type Typer::Visitor::TypeJS##Name(Type input) { \
+ return TypeUnaryOp(input, Name); \
+ }
+DEFINE_METHOD(BitwiseNot)
+DEFINE_METHOD(Decrement)
+DEFINE_METHOD(Increment)
+DEFINE_METHOD(Negate)
+DEFINE_METHOD(ToLength)
+DEFINE_METHOD(ToName)
+DEFINE_METHOD(ToNumber)
+DEFINE_METHOD(ToNumberConvertBigInt)
+DEFINE_METHOD(ToNumeric)
+DEFINE_METHOD(ToObject)
+DEFINE_METHOD(ToString)
+#undef DEFINE_METHOD
Type Typer::Visitor::TypeTypeOf(Node* node) {
return Type::InternalizedString();
}
-
// JS conversion operators.
Type Typer::Visitor::TypeToBoolean(Node* node) {
return TypeUnaryOp(node, ToBoolean);
}
-Type Typer::Visitor::TypeJSToLength(Node* node) {
- return TypeUnaryOp(node, ToLength);
-}
-
-Type Typer::Visitor::TypeJSToName(Node* node) {
- return TypeUnaryOp(node, ToName);
-}
-
-Type Typer::Visitor::TypeJSToNumber(Node* node) {
- return TypeUnaryOp(node, ToNumber);
-}
-
-Type Typer::Visitor::TypeJSToNumberConvertBigInt(Node* node) {
- return TypeUnaryOp(node, ToNumberConvertBigInt);
-}
-
-Type Typer::Visitor::TypeJSToNumeric(Node* node) {
- return TypeUnaryOp(node, ToNumeric);
-}
-
-Type Typer::Visitor::TypeJSToObject(Node* node) {
- return TypeUnaryOp(node, ToObject);
-}
-
-Type Typer::Visitor::TypeJSToString(Node* node) {
- return TypeUnaryOp(node, ToString);
-}
-
// JS object operators.
Type Typer::Visitor::TypeJSCreate(Node* node) { return Type::Object(); }
@@ -1352,7 +1362,7 @@ Type Typer::Visitor::TypeJSLoadGlobal(Node* node) {
return Type::NonInternal();
}
-Type Typer::Visitor::TypeJSParseInt(Node* node) { return Type::Number(); }
+Type Typer::Visitor::TypeJSParseInt(Type input) { return Type::Number(); }
Type Typer::Visitor::TypeJSRegExpTest(Node* node) { return Type::Boolean(); }
@@ -2167,6 +2177,8 @@ Type Typer::Visitor::TypeCheckNotTaggedHole(Node* node) {
return type;
}
+Type Typer::Visitor::TypeCheckClosure(Node* node) { return Type::Function(); }
+
Type Typer::Visitor::TypeConvertReceiver(Node* node) {
Type arg = Operand(node, 0);
return typer_->operation_typer_.ConvertReceiver(arg);
@@ -2382,7 +2394,7 @@ Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); }
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
- return Type::NewConstant(typer_->broker(), value, zone());
+ return Type::Constant(typer_->broker(), value, zone());
}
Type Typer::Visitor::TypeJSGetIterator(Node* node) { return Type::Any(); }
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 40adc60699..08e6e8023d 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -239,8 +239,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_FINALIZATION_GROUP_TYPE:
- case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_REGISTRY_TYPE:
+ case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
@@ -322,6 +322,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PREPARSE_DATA_TYPE:
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
+ case COVERAGE_INFO_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -368,7 +369,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
#define MAKE_TORQUE_CLASS_TYPE(V) case V:
- TORQUE_INTERNAL_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+ TORQUE_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
@@ -563,7 +564,7 @@ bool Type::SlowIs(Type that) const {
}
if (that.IsRange()) {
- return (this->IsRange() && Contains(that.AsRange(), this->AsRange()));
+ return this->IsRange() && Contains(that.AsRange(), this->AsRange());
}
if (this->IsRange()) return false;
@@ -840,7 +841,7 @@ Type Type::NormalizeRangeAndBitset(Type range, bitset* bits, Zone* zone) {
return Type::Range(range_min, range_max, zone);
}
-Type Type::NewConstant(double value, Zone* zone) {
+Type Type::Constant(double value, Zone* zone) {
if (RangeType::IsInteger(value)) {
return Range(value, value, zone);
} else if (IsMinusZero(value)) {
@@ -853,14 +854,13 @@ Type Type::NewConstant(double value, Zone* zone) {
return OtherNumberConstant(value, zone);
}
-Type Type::NewConstant(JSHeapBroker* broker, Handle<i::Object> value,
- Zone* zone) {
+Type Type::Constant(JSHeapBroker* broker, Handle<i::Object> value, Zone* zone) {
ObjectRef ref(broker, value);
if (ref.IsSmi()) {
- return NewConstant(static_cast<double>(ref.AsSmi()), zone);
+ return Constant(static_cast<double>(ref.AsSmi()), zone);
}
if (ref.IsHeapNumber()) {
- return NewConstant(ref.AsHeapNumber().value(), zone);
+ return Constant(ref.AsHeapNumber().value(), zone);
}
if (ref.IsString() && !ref.IsInternalizedString()) {
return Type::String();
@@ -1093,15 +1093,12 @@ Type Type::OtherNumberConstant(double value, Zone* zone) {
}
// static
-Type Type::HeapConstant(JSHeapBroker* broker, Handle<i::Object> value,
- Zone* zone) {
- return FromTypeBase(
- HeapConstantType::New(HeapObjectRef(broker, value), zone));
-}
-
-// static
Type Type::HeapConstant(const HeapObjectRef& value, Zone* zone) {
- return HeapConstantType::New(value, zone);
+ DCHECK(!value.IsHeapNumber());
+ DCHECK_IMPLIES(value.IsString(), value.IsInternalizedString());
+ BitsetType::bitset bitset = BitsetType::Lub(value.GetHeapObjectType());
+ if (Type(bitset).IsSingleton()) return Type(bitset);
+ return HeapConstantType::New(value, bitset, zone);
}
// static
@@ -1114,11 +1111,6 @@ Type Type::Range(RangeType::Limits lims, Zone* zone) {
return FromTypeBase(RangeType::New(lims, zone));
}
-// static
-Type Type::Union(int length, Zone* zone) {
- return FromTypeBase(UnionType::New(length, zone));
-}
-
const HeapConstantType* Type::AsHeapConstant() const {
DCHECK(IsKind(TypeBase::kHeapConstant));
return static_cast<const HeapConstantType*>(ToTypeBase());
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 0dc1aa77b0..fb1fa37d9d 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -191,7 +191,7 @@ namespace compiler {
V(SymbolOrReceiver, kSymbol | kReceiver) \
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | \
- kUndefined | kReceiver) \
+ kUndefined | kHole | kReceiver) \
V(Internal, kHole | kExternalPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
V(NonBigInt, kNonBigIntPrimitive | kReceiver) \
@@ -369,26 +369,15 @@ class V8_EXPORT_PRIVATE Type {
static Type SignedSmall() { return NewBitset(BitsetType::SignedSmall()); }
static Type UnsignedSmall() { return NewBitset(BitsetType::UnsignedSmall()); }
- static Type OtherNumberConstant(double value, Zone* zone);
- static Type HeapConstant(JSHeapBroker* broker, Handle<i::Object> value,
- Zone* zone);
- static Type HeapConstant(const HeapObjectRef& value, Zone* zone);
+ static Type Constant(JSHeapBroker* broker, Handle<i::Object> value,
+ Zone* zone);
+ static Type Constant(double value, Zone* zone);
static Type Range(double min, double max, Zone* zone);
- static Type Range(RangeType::Limits lims, Zone* zone);
static Type Tuple(Type first, Type second, Type third, Zone* zone);
- static Type Union(int length, Zone* zone);
-
- // NewConstant is a factory that returns Constant, Range or Number.
- static Type NewConstant(JSHeapBroker* broker, Handle<i::Object> value,
- Zone* zone);
- static Type NewConstant(double value, Zone* zone);
static Type Union(Type type1, Type type2, Zone* zone);
static Type Intersect(Type type1, Type type2, Zone* zone);
- static Type For(HeapObjectType const& type) {
- return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
- }
static Type For(MapRef const& type) {
return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
}
@@ -412,6 +401,13 @@ class V8_EXPORT_PRIVATE Type {
}
bool IsTuple() const { return IsKind(TypeBase::kTuple); }
+ bool IsSingleton() const {
+ if (IsNone()) return false;
+ return Is(Type::Null()) || Is(Type::Undefined()) || Is(Type::MinusZero()) ||
+ Is(Type::NaN()) || Is(Type::Hole()) || IsHeapConstant() ||
+ (Is(Type::PlainNumber()) && Min() == Max());
+ }
+
const HeapConstantType* AsHeapConstant() const;
const OtherNumberConstantType* AsOtherNumberConstant() const;
const RangeType* AsRange() const;
@@ -459,6 +455,7 @@ class V8_EXPORT_PRIVATE Type {
friend size_t hash_value(Type type);
explicit Type(bitset bits) : payload_(bits | 1u) {}
+
Type(TypeBase* type_base) // NOLINT(runtime/explicit)
: payload_(reinterpret_cast<uintptr_t>(type_base)) {}
@@ -491,6 +488,10 @@ class V8_EXPORT_PRIVATE Type {
static Type NewBitset(bitset bits) { return Type(bits); }
+ static Type Range(RangeType::Limits lims, Zone* zone);
+ static Type OtherNumberConstant(double value, Zone* zone);
+ static Type HeapConstant(const HeapObjectRef& value, Zone* zone);
+
static bool Overlap(const RangeType* lhs, const RangeType* rhs);
static bool Contains(const RangeType* lhs, const RangeType* rhs);
@@ -553,10 +554,8 @@ class V8_EXPORT_PRIVATE HeapConstantType : public NON_EXPORTED_BASE(TypeBase) {
friend class Type;
friend class BitsetType;
- static HeapConstantType* New(const HeapObjectRef& heap_ref, Zone* zone) {
- DCHECK(!heap_ref.IsHeapNumber());
- DCHECK_IMPLIES(heap_ref.IsString(), heap_ref.IsInternalizedString());
- BitsetType::bitset bitset = BitsetType::Lub(heap_ref.GetHeapObjectType());
+ static HeapConstantType* New(const HeapObjectRef& heap_ref,
+ BitsetType::bitset bitset, Zone* zone) {
return new (zone->New(sizeof(HeapConstantType)))
HeapConstantType(bitset, heap_ref);
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 4384c1c066..1be87c9463 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -21,6 +21,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/state-values-utils.h"
#include "src/compiler/type-cache.h"
#include "src/utils/bit-vector.h"
#include "src/utils/ostreams.h"
@@ -150,8 +151,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// If this node has any effect outputs, make sure that it is
// consumed as an effect input somewhere else.
- // TODO(mvstanton): support this kind of verification for WASM
- // compiles, too.
+ // TODO(mvstanton): support this kind of verification for Wasm compiles, too.
if (code_type != kWasm && node->op()->EffectOutputCount() > 0) {
int effect_edges = 0;
for (Edge edge : node->use_edges()) {
@@ -387,14 +387,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
CheckNotTyped(node);
break;
- case IrOpcode::kOsrNormalEntry:
- case IrOpcode::kOsrLoopEntry:
- // Osr entries take one control and effect.
- CHECK_EQ(1, control_count);
- CHECK_EQ(1, effect_count);
- CHECK_EQ(2, input_count);
- CheckNotTyped(node);
- break;
// Common operators
// ----------------
@@ -413,6 +405,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
case IrOpcode::kInt32Constant: // TODO(turbofan): rename Word32Constant?
case IrOpcode::kInt64Constant: // TODO(turbofan): rename Word64Constant?
+ case IrOpcode::kTaggedIndexConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kRelocatableInt32Constant:
@@ -550,11 +543,34 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
NodeProperties::GetValueInput(node, i)->opcode() ==
IrOpcode::kTypedStateValues);
}
- // The accumulator (InputAt(2)) cannot be kStateValues, but it can be
- // kTypedStateValues (to signal the type). Once AST graph builder
- // is removed, we should check this here. Until then, AST graph
- // builder can generate expression stack as InputAt(2), which can
- // still be kStateValues.
+
+ // Checks that the state input is empty for all but kInterpretedFunction
+ // frames, where it should have size one.
+ {
+ const FrameStateInfo& state_info = FrameStateInfoOf(node->op());
+ const FrameStateFunctionInfo* func_info = state_info.function_info();
+ CHECK_EQ(func_info->parameter_count(),
+ StateValuesAccess(node->InputAt(kFrameStateParametersInput))
+ .size());
+ CHECK_EQ(
+ func_info->local_count(),
+ StateValuesAccess(node->InputAt(kFrameStateLocalsInput)).size());
+
+ Node* accumulator = node->InputAt(kFrameStateStackInput);
+ if (func_info->type() == FrameStateType::kInterpretedFunction) {
+ // The accumulator (InputAt(2)) cannot be kStateValues.
+ // It can be kTypedStateValues (to signal the type) and it can have
+ // other Node types including that of the optimized_out HeapConstant.
+ CHECK_NE(accumulator->opcode(), IrOpcode::kStateValues);
+ if (accumulator->opcode() == IrOpcode::kTypedStateValues) {
+ CHECK_EQ(1, StateValuesAccess(accumulator).size());
+ }
+ } else {
+ CHECK(accumulator->opcode() == IrOpcode::kTypedStateValues ||
+ accumulator->opcode() == IrOpcode::kStateValues);
+ CHECK_EQ(0, StateValuesAccess(accumulator).size());
+ }
+ }
break;
}
case IrOpcode::kObjectId:
@@ -1394,6 +1410,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckTypeIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kCheckClosure:
+ // Any -> Function
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Function());
+ break;
case IrOpcode::kCheckHeapObject:
CheckValueInputIs(node, 0, Type::Any());
break;
@@ -1505,7 +1526,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckTypeIs(node, FieldAccessOf(node->op()).type));
+ // CheckTypeIs(node, FieldAccessOf(node->op()).type);
break;
case IrOpcode::kLoadElement:
case IrOpcode::kLoadStackArgument:
@@ -1515,7 +1536,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, ElementAccessOf(node->op()).type));
break;
case IrOpcode::kLoadFromObject:
- // TODO(gsps): Can we check some types here?
+ CheckValueInputIs(node, 0, Type::Receiver());
break;
case IrOpcode::kLoadTypedElement:
break;
@@ -1567,6 +1588,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTypeGuard:
CheckTypeIs(node, TypeGuardTypeOf(node->op()));
break;
+ case IrOpcode::kFoldConstant:
+ if (typing == TYPED) {
+ Type type = NodeProperties::GetType(node);
+ CHECK(type.IsSingleton());
+ CHECK(type.Equals(NodeProperties::GetType(node->InputAt(0))));
+ CHECK(type.Equals(NodeProperties::GetType(node->InputAt(1))));
+ }
+ break;
case IrOpcode::kDateNow:
CHECK_EQ(0, value_count);
CheckTypeIs(node, Type::Number());
@@ -1575,6 +1604,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::BigInt());
break;
+ case IrOpcode::kFastApiCall:
+ CHECK_GE(value_count, 1);
+ CheckValueInputIs(node, 0, Type::ExternalPointer());
+ break;
// Machine operators
// -----------------------
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index db1731f388..9373a2b4b9 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -62,13 +62,6 @@ namespace compiler {
namespace {
-// TODO(titzer): pull WASM_64 up to a common header.
-#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
-#define WASM_64 1
-#else
-#define WASM_64 0
-#endif
-
#define FATAL_UNSUPPORTED_OPCODE(opcode) \
FATAL("Unsupported opcode 0x%x:%s", (opcode), \
wasm::WasmOpcodes::OpcodeName(opcode));
@@ -85,28 +78,22 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-#define LOAD_RAW(base_pointer, byte_offset, type) \
- SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
- mcgraph()->Int32Constant(byte_offset), effect(), \
- control()))
-
-#define LOAD_RAW_NODE_OFFSET(base_pointer, node_offset, type) \
- SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
- node_offset, effect(), control()))
+#define LOAD_INSTANCE_FIELD(name, type) \
+ gasm_->Load(assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
+ instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name))
-#define LOAD_INSTANCE_FIELD(name, type) \
- LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \
- assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type))
+#define LOAD_FULL_POINTER(base_pointer, byte_offset) \
+ gasm_->Load(MachineType::Pointer(), base_pointer, byte_offset)
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
- LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer())
+ gasm_->Load(MachineType::TaggedPointer(), base_pointer, byte_offset)
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
- LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
+ gasm_->Load(MachineType::AnyTagged(), base_pointer, byte_offset)
#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
- LOAD_RAW(array_node, \
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
+ gasm_->Load(type, array_node, \
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
@@ -117,15 +104,11 @@ MachineType assert_size(int expected_size, MachineType type) {
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
-#define STORE_RAW(base, offset, val, rep, barrier) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Store(StoreRepresentation(rep, barrier)), base, \
- mcgraph()->Int32Constant(offset), val, effect(), control()))
+#define STORE_RAW(base, offset, val, rep, barrier) \
+ STORE_RAW_NODE_OFFSET(base, gasm_->Int32Constant(offset), val, rep, barrier)
-#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Store(StoreRepresentation(rep, barrier)), base, \
- node_offset, val, effect(), control()))
+#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
+ gasm_->Store(StoreRepresentation(rep, barrier), base, node_offset, val)
// This can be used to store tagged Smi values only.
#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
@@ -148,14 +131,14 @@ void MergeControlToEnd(MachineGraph* mcgraph, Node* node) {
}
}
-bool ContainsSimd(wasm::FunctionSig* sig) {
+bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
if (type == wasm::kWasmS128) return true;
}
return false;
}
-bool ContainsInt64(wasm::FunctionSig* sig) {
+bool ContainsInt64(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
if (type == wasm::kWasmI64) return true;
}
@@ -171,7 +154,7 @@ class WasmGraphAssembler : public GraphAssembler {
WasmGraphBuilder::WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
- wasm::FunctionSig* sig,
+ const wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
: gasm_(std::make_unique<WasmGraphAssembler>(mcgraph, zone)),
zone_(zone),
@@ -262,9 +245,8 @@ Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count,
Node** vals_and_control) {
DCHECK(IrOpcode::IsMergeOpcode(vals_and_control[count]->opcode()));
return graph()->NewNode(
- mcgraph()->common()->Phi(wasm::ValueTypes::MachineRepresentationFor(type),
- count),
- count + 1, vals_and_control);
+ mcgraph()->common()->Phi(type.machine_representation(), count), count + 1,
+ vals_and_control);
}
Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
@@ -275,7 +257,7 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
Node* WasmGraphBuilder::RefNull() {
Node* isolate_root = BuildLoadIsolateRoot();
- return LOAD_TAGGED_POINTER(
+ return LOAD_FULL_POINTER(
isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue));
}
@@ -1148,26 +1130,26 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
Node* result;
Node* value = node;
MachineOperatorBuilder* m = mcgraph()->machine();
- int valueSizeInBytes = wasm::ValueTypes::ElementSizeInBytes(wasmtype);
+ int valueSizeInBytes = wasmtype.element_size_bytes();
int valueSizeInBits = 8 * valueSizeInBytes;
bool isFloat = false;
- switch (wasmtype) {
- case wasm::kWasmF64:
+ switch (wasmtype.kind()) {
+ case wasm::ValueType::kF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
isFloat = true;
V8_FALLTHROUGH;
- case wasm::kWasmI64:
+ case wasm::ValueType::kI64:
result = mcgraph()->Int64Constant(0);
break;
- case wasm::kWasmF32:
+ case wasm::ValueType::kF32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
isFloat = true;
V8_FALLTHROUGH;
- case wasm::kWasmI32:
+ case wasm::ValueType::kI32:
result = mcgraph()->Int32Constant(0);
break;
- case wasm::kWasmS128:
+ case wasm::ValueType::kS128:
DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
break;
default:
@@ -1182,7 +1164,7 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
- valueSizeInBytes = wasm::ValueTypes::ElementSizeInBytes(wasm::kWasmI32);
+ valueSizeInBytes = wasm::kWasmI32.element_size_bytes();
valueSizeInBits = 8 * valueSizeInBytes;
if (mem_rep == MachineRepresentation::kWord16) {
value =
@@ -1256,11 +1238,11 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
}
if (isFloat) {
- switch (wasmtype) {
- case wasm::kWasmF64:
+ switch (wasmtype.kind()) {
+ case wasm::ValueType::kF64:
result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
break;
- case wasm::kWasmF32:
+ case wasm::ValueType::kF32:
result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
break;
default:
@@ -1431,31 +1413,24 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
}
Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
-#if WASM_64
- Node* result = Unop(
- wasm::kExprF64ReinterpretI64,
- Binop(wasm::kExprI64Ior,
- Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
- mcgraph()->Int64Constant(0x7FFFFFFFFFFFFFFF)),
- Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
- mcgraph()->Int64Constant(0x8000000000000000))));
+ if (mcgraph()->machine()->Is64()) {
+ return gasm_->BitcastInt64ToFloat64(gasm_->Word64Or(
+ gasm_->Word64And(gasm_->BitcastFloat64ToInt64(left),
+ gasm_->Int64Constant(0x7FFFFFFFFFFFFFFF)),
+ gasm_->Word64And(gasm_->BitcastFloat64ToInt64(right),
+ gasm_->Int64Constant(0x8000000000000000))));
+ }
- return result;
-#else
- MachineOperatorBuilder* m = mcgraph()->machine();
+ DCHECK(mcgraph()->machine()->Is32());
- Node* high_word_left = graph()->NewNode(m->Float64ExtractHighWord32(), left);
- Node* high_word_right =
- graph()->NewNode(m->Float64ExtractHighWord32(), right);
+ Node* high_word_left = gasm_->Float64ExtractHighWord32(left);
+ Node* high_word_right = gasm_->Float64ExtractHighWord32(right);
- Node* new_high_word = Binop(wasm::kExprI32Ior,
- Binop(wasm::kExprI32And, high_word_left,
- mcgraph()->Int32Constant(0x7FFFFFFF)),
- Binop(wasm::kExprI32And, high_word_right,
- mcgraph()->Int32Constant(0x80000000)));
+ Node* new_high_word = gasm_->Word32Or(
+ gasm_->Word32And(high_word_left, gasm_->Int32Constant(0x7FFFFFFF)),
+ gasm_->Word32And(high_word_right, gasm_->Int32Constant(0x80000000)));
- return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
-#endif
+ return gasm_->Float64InsertHighWord32(left, new_high_word);
}
namespace {
@@ -1763,14 +1738,7 @@ Node* WasmGraphBuilder::BuildI32AsmjsUConvertF64(Node* input) {
Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
MachineRepresentation input_type) {
- Node* stack_slot_param =
- graph()->NewNode(mcgraph()->machine()->StackSlot(input_type));
-
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(input_type, kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot_param,
- mcgraph()->Int32Constant(0), input, effect(),
- control()));
+ Node* stack_slot_param = StoreArgsInStackSlot({{input_type, input}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
@@ -1885,27 +1853,17 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
// parameters in this buffer slots, pass a pointer to the buffer to the C
// function, and after calling the C function we collect the return value from
// the buffer.
-
- const int type_size = ElementSizeInBytes(type.representation());
- const int stack_slot_bytes = (input1 == nullptr ? 1 : 2) * type_size;
- Node* stack_slot =
- graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_bytes));
-
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(type.representation(), kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input0, effect(), control()));
-
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
-
- if (input1 != nullptr) {
- SetEffect(graph()->NewNode(store_op, stack_slot,
- mcgraph()->Int32Constant(type_size), input1,
- effect(), control()));
+ Node* stack_slot;
+ if (input1) {
+ stack_slot = StoreArgsInStackSlot(
+ {{type.representation(), input0}, {type.representation(), input1}});
+ } else {
+ stack_slot = StoreArgsInStackSlot({{type.representation(), input0}});
}
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
+ Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
BuildCCall(&sig, function, stack_slot);
return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type),
@@ -2066,17 +2024,17 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
MachineOperatorBuilder* m = mcgraph()->machine();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value = values[i];
- switch (sig->GetParam(i)) {
- case wasm::kWasmF32:
+ switch (sig->GetParam(i).kind()) {
+ case wasm::ValueType::kF32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
V8_FALLTHROUGH;
- case wasm::kWasmI32:
+ case wasm::ValueType::kI32:
BuildEncodeException32BitValue(values_array, &index, value);
break;
- case wasm::kWasmF64:
+ case wasm::ValueType::kF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
V8_FALLTHROUGH;
- case wasm::kWasmI64: {
+ case wasm::ValueType::kI64: {
Node* upper32 = graph()->NewNode(
m->TruncateInt64ToInt32(),
Binop(wasm::kExprI64ShrU, value, Int64Constant(32)));
@@ -2085,7 +2043,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
BuildEncodeException32BitValue(values_array, &index, lower32);
break;
}
- case wasm::kWasmS128:
+ case wasm::ValueType::kS128:
BuildEncodeException32BitValue(
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(0), value));
@@ -2099,14 +2057,15 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
- case wasm::kWasmAnyRef:
- case wasm::kWasmFuncRef:
- case wasm::kWasmNullRef:
- case wasm::kWasmExnRef:
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
- default:
+ case wasm::ValueType::kStmt:
+ case wasm::ValueType::kBottom:
UNREACHABLE();
}
}
@@ -2165,7 +2124,6 @@ Node* WasmGraphBuilder::BuildDecodeException64BitValue(Node* values_array,
}
Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
- needs_stack_check_ = true;
// TODO(v8:8091): Currently the message of the original exception is not being
// preserved when rethrown to the console. The pending message will need to be
// saved when caught and restored here while being rethrown.
@@ -2176,9 +2134,7 @@ Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- except_obj, effect(), control()));
+ return gasm_->Call(call_descriptor, call_target, except_obj);
}
Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
@@ -2194,8 +2150,10 @@ Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
return tag;
}
-Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
- needs_stack_check_ = true;
+Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj,
+ wasm::WasmCodePosition position) {
+ TrapIfTrue(wasm::kTrapBrOnExnNullRef, gasm_->WordEqual(RefNull(), except_obj),
+ position);
return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1);
}
@@ -2209,24 +2167,24 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
DCHECK_EQ(sig->parameter_count(), values.size());
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value;
- switch (sig->GetParam(i)) {
- case wasm::kWasmI32:
+ switch (sig->GetParam(i).kind()) {
+ case wasm::ValueType::kI32:
value = BuildDecodeException32BitValue(values_array, &index);
break;
- case wasm::kWasmI64:
+ case wasm::ValueType::kI64:
value = BuildDecodeException64BitValue(values_array, &index);
break;
- case wasm::kWasmF32: {
+ case wasm::ValueType::kF32: {
value = Unop(wasm::kExprF32ReinterpretI32,
BuildDecodeException32BitValue(values_array, &index));
break;
}
- case wasm::kWasmF64: {
+ case wasm::ValueType::kF64: {
value = Unop(wasm::kExprF64ReinterpretI64,
BuildDecodeException64BitValue(values_array, &index));
break;
}
- case wasm::kWasmS128:
+ case wasm::ValueType::kS128:
value = graph()->NewNode(
mcgraph()->machine()->I32x4Splat(),
BuildDecodeException32BitValue(values_array, &index));
@@ -2240,14 +2198,15 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
mcgraph()->machine()->I32x4ReplaceLane(3), value,
BuildDecodeException32BitValue(values_array, &index));
break;
- case wasm::kWasmAnyRef:
- case wasm::kWasmFuncRef:
- case wasm::kWasmNullRef:
- case wasm::kWasmExnRef:
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
- default:
+ case wasm::ValueType::kStmt:
+ case wasm::ValueType::kBottom:
UNREACHABLE();
}
values[i] = value;
@@ -2551,15 +2510,8 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
wasm::TrapReason trap_zero,
wasm::WasmCodePosition position) {
Node* stack_slot =
- graph()->NewNode(mcgraph()->machine()->StackSlot(2 * sizeof(double)));
-
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(MachineRepresentation::kWord64, kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- left, effect(), control()));
- SetEffect(graph()->NewNode(store_op, stack_slot,
- mcgraph()->Int32Constant(sizeof(double)), right,
- effect(), control()));
+ StoreArgsInStackSlot({{MachineRepresentation::kWord64, left},
+ {MachineRepresentation::kWord64, right}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
@@ -2588,7 +2540,7 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
}
-Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, const Operator* op) {
@@ -2623,7 +2575,7 @@ Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* instance_node,
@@ -2650,7 +2602,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node,
@@ -2665,7 +2617,7 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
int func_index,
@@ -2696,7 +2648,7 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
}
}
-Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* func_index,
@@ -2712,9 +2664,9 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig,
Node* func_index_times_tagged_size = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
mcgraph()->Int32Constant(kTaggedSize));
- Node* ref_node = LOAD_RAW_NODE_OFFSET(imported_instances_data,
- func_index_times_tagged_size,
- MachineType::TaggedPointer());
+ Node* ref_node =
+ gasm_->Load(MachineType::TaggedPointer(), imported_instances_data,
+ func_index_times_tagged_size);
// Load the target from the imported_targets array at the offset of
// {func_index}.
@@ -2750,7 +2702,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Vector<Node*> args,
Vector<Node*> rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
- wasm::FunctionSig* sig = env_->module->functions[index].sig;
+ const wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (env_ && index < env_->module->num_imported_functions) {
// Call to an imported function.
@@ -2793,25 +2745,21 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
LOAD_INSTANCE_FIELD(IndirectFunctionTables, MachineType::TaggedPointer());
Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index);
- *ift_size = LOAD_RAW(
- ift_table,
- wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
- MachineType::Int32());
+ *ift_size = gasm_->Load(
+ MachineType::Int32(), ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset));
- *ift_sig_ids = LOAD_RAW(
- ift_table,
- wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset),
- MachineType::Pointer());
+ *ift_sig_ids = gasm_->Load(
+ MachineType::Pointer(), ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset));
- *ift_targets = LOAD_RAW(
- ift_table,
- wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset),
- MachineType::Pointer());
+ *ift_targets = gasm_->Load(
+ MachineType::Pointer(), ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset));
- *ift_instances = LOAD_RAW(
- ift_table,
- wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset),
- MachineType::TaggedPointer());
+ *ift_instances = gasm_->Load(
+ MachineType::TaggedPointer(), ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset));
}
Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
@@ -2831,7 +2779,7 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
&ift_instances);
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
+ const wasm::FunctionSig* sig = env_->module->signatures[sig_index];
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
@@ -2875,10 +2823,10 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
int32_scaled_key);
}
- Node* target_instance = LOAD_RAW(
+ Node* target_instance = gasm_->Load(
+ MachineType::TaggedPointer(),
graph()->NewNode(machine->IntAdd(), ift_instances, tagged_scaled_key),
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0),
- MachineType::TaggedPointer());
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0));
Node* intptr_scaled_key;
if (kSystemPointerSize == kTaggedSize) {
@@ -2910,7 +2858,7 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* WasmGraphBuilder::ReturnCall(uint32_t index, Vector<Node*> args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
- wasm::FunctionSig* sig = env_->module->functions[index].sig;
+ const wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (env_ && index < env_->module->num_imported_functions) {
// Return Call to an imported function.
@@ -2991,6 +2939,7 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+ // With pointer compression, only the lower 32 bits are used.
if (COMPRESS_POINTERS_BOOL) {
return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
BuildSmiShiftBitsConstant32());
@@ -3299,7 +3248,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
DCHECK_EQ(1, fun->result_size);
auto centry_id =
Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
- Node* centry_stub = LOAD_TAGGED_POINTER(
+ Node* centry_stub = LOAD_FULL_POINTER(
isolate_root, IsolateData::builtin_slot_offset(centry_id));
// TODO(titzer): allow arbitrary number of runtime arguments
// At the moment we only allow 5 parameters. If more parameters are needed,
@@ -3334,39 +3283,36 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (wasm::ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
- return LOAD_RAW_NODE_OFFSET(base, offset, MachineType::AnyTagged());
+ return gasm_->Load(MachineType::AnyTagged(), base, offset);
}
Node* globals_buffer =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
return LOAD_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset);
}
- MachineType mem_type =
- wasm::ValueTypes::MachineTypeFor(env_->module->globals[index].type);
+ MachineType mem_type = global.type.machine_type();
if (mem_type.representation() == MachineRepresentation::kSimd128) {
has_simd_ = true;
}
Node* base = nullptr;
Node* offset = nullptr;
- GetGlobalBaseAndOffset(mem_type, env_->module->globals[index], &base,
- &offset);
+ GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
Node* result = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(mem_type), base, offset, effect(), control()));
#if defined(V8_TARGET_BIG_ENDIAN)
- result = BuildChangeEndiannessLoad(result, mem_type,
- env_->module->globals[index].type);
+ result = BuildChangeEndiannessLoad(result, mem_type, global.type);
#endif
return result;
}
Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (wasm::ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
@@ -3377,24 +3323,20 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
}
Node* globals_buffer =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
- return STORE_FIXED_ARRAY_SLOT_ANY(globals_buffer,
- env_->module->globals[index].offset, val);
+ return STORE_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset, val);
}
- MachineType mem_type =
- wasm::ValueTypes::MachineTypeFor(env_->module->globals[index].type);
+ MachineType mem_type = global.type.machine_type();
if (mem_type.representation() == MachineRepresentation::kSimd128) {
has_simd_ = true;
}
Node* base = nullptr;
Node* offset = nullptr;
- GetGlobalBaseAndOffset(mem_type, env_->module->globals[index], &base,
- &offset);
+ GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
const Operator* op = mcgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
#if defined(V8_TARGET_BIG_ENDIAN)
- val = BuildChangeEndiannessStore(val, mem_type.representation(),
- env_->module->globals[index].type);
+ val = BuildChangeEndiannessStore(val, mem_type.representation(), global.type);
#endif
return SetEffect(
graph()->NewNode(op, base, offset, val, effect(), control()));
@@ -3409,10 +3351,9 @@ void WasmGraphBuilder::BoundsCheckTable(uint32_t table_index, Node* entry_index,
int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
WasmTableObject::kCurrentLengthOffset + 1;
- Node* length_smi = LOAD_RAW(
- table,
- wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset),
- assert_size(length_field_size, MachineType::TaggedSigned()));
+ Node* length_smi = gasm_->Load(
+ assert_size(length_field_size, MachineType::TaggedSigned()), table,
+ wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset));
Node* length = BuildChangeSmiToInt32(length_smi);
// Bounds check against the table size.
@@ -3423,9 +3364,9 @@ void WasmGraphBuilder::BoundsCheckTable(uint32_t table_index, Node* entry_index,
if (base_node) {
int storage_field_size = WasmTableObject::kEntriesOffsetEnd -
WasmTableObject::kEntriesOffset + 1;
- *base_node = LOAD_RAW(
- table, wasm::ObjectAccess::ToTagged(WasmTableObject::kEntriesOffset),
- assert_size(storage_field_size, MachineType::TaggedPointer()));
+ *base_node = gasm_->Load(
+ assert_size(storage_field_size, MachineType::TaggedPointer()), table,
+ wasm::ObjectAccess::ToTagged(WasmTableObject::kEntriesOffset));
}
}
@@ -3457,7 +3398,7 @@ Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
Node* base = nullptr;
Node* offset = nullptr;
GetTableBaseAndOffset(table_index, index, position, &base, &offset);
- return LOAD_RAW_NODE_OFFSET(base, offset, MachineType::AnyTagged());
+ return gasm_->Load(MachineType::AnyTagged(), base, offset);
}
// We access funcref tables through runtime calls.
WasmTableGetDescriptor interface_descriptor;
@@ -3648,15 +3589,15 @@ Node* WasmGraphBuilder::BoundsCheckMemRange(Node** start, Node** size,
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
wasm::ValueType type) {
- int alignment = offset % (wasm::ValueTypes::ElementSizeInBytes(type));
- MachineType mach_type = wasm::ValueTypes::MachineTypeFor(type);
+ int alignment = offset % type.element_size_bytes();
+ MachineType mach_type = type.machine_type();
if (COMPRESS_POINTERS_BOOL && mach_type.IsTagged()) {
// We are loading tagged value from off-heap location, so we need to load
// it as a full word otherwise we will not be able to decompress it.
mach_type = MachineType::Pointer();
}
if (alignment == 0 || mcgraph()->machine()->UnalignedLoadSupported(
- wasm::ValueTypes::MachineRepresentationFor(type))) {
+ type.machine_representation())) {
return mcgraph()->machine()->Load(mach_type);
}
return mcgraph()->machine()->UnalignedLoad(mach_type);
@@ -3664,8 +3605,8 @@ const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
wasm::ValueType type) {
- int alignment = offset % (wasm::ValueTypes::ElementSizeInBytes(type));
- MachineRepresentation rep = wasm::ValueTypes::MachineRepresentationFor(type);
+ int alignment = offset % type.element_size_bytes();
+ MachineRepresentation rep = type.machine_representation();
if (COMPRESS_POINTERS_BOOL && IsAnyTagged(rep)) {
// We are storing tagged value to off-heap location, so we need to store
// it as a full word otherwise we will not be able to decompress it.
@@ -3760,7 +3701,47 @@ LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
}
} // namespace
-Node* WasmGraphBuilder::LoadTransform(MachineType memtype,
+// S390 simulator does not execute BE code, hence needs to also check if we are
+// running on a LE simulator.
+// TODO(miladfar): Remove SIM once V8_TARGET_BIG_ENDIAN includes the Sim.
+#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
+Node* WasmGraphBuilder::LoadTransformBigEndian(
+ MachineType memtype, wasm::LoadTransformationKind transform, Node* value) {
+ Node* result;
+ LoadTransformation transformation = GetLoadTransformation(memtype, transform);
+
+ switch (transformation) {
+ case LoadTransformation::kS8x16LoadSplat: {
+ result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), value);
+ break;
+ }
+ case LoadTransformation::kI16x8Load8x8S:
+ case LoadTransformation::kI16x8Load8x8U:
+ case LoadTransformation::kS16x8LoadSplat: {
+ result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), value);
+ break;
+ }
+ case LoadTransformation::kI32x4Load16x4S:
+ case LoadTransformation::kI32x4Load16x4U:
+ case LoadTransformation::kS32x4LoadSplat: {
+ result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), value);
+ break;
+ }
+ case LoadTransformation::kI64x2Load32x2S:
+ case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kS64x2LoadSplat: {
+ result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), value);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return result;
+}
+#endif
+
+Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
Node* index, uint32_t offset,
uint32_t alignment,
@@ -3769,24 +3750,31 @@ Node* WasmGraphBuilder::LoadTransform(MachineType memtype,
has_simd_ = true;
}
+ Node* load;
+
+#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
+ // LoadTransform cannot efficiently be executed on BE machines as a
+ // single operation since loaded bytes need to be reversed first,
+ // therefore we divide them into separate "load" and "operation" nodes.
+ load = LoadMem(type, memtype, index, offset, alignment, position);
+ load = LoadTransformBigEndian(memtype, transform, load);
+ USE(GetLoadKind);
+#else
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
- index = BoundsCheckMem(wasm::ValueTypes::MemSize(memtype), index, offset,
- position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(memtype.MemSize(), index, offset, position,
+ kCanOmitBoundsCheck);
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
LoadKind load_kind = GetLoadKind(mcgraph(), memtype, use_trap_handler());
- Node* load = SetEffect(graph()->NewNode(
+ load = SetEffect(graph()->NewNode(
mcgraph()->machine()->LoadTransform(load_kind, transformation),
MemBuffer(offset), index, effect(), control()));
if (load_kind == LoadKind::kProtected) {
SetSourcePosition(load, position);
}
-
-#if defined(V8_TARGET_BIG_ENDIAN)
- load = BuildChangeEndiannessLoad(load, memtype, wasm::ValueType::kWasmS128);
#endif
if (FLAG_trace_wasm_memory) {
@@ -3808,8 +3796,8 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
- index = BoundsCheckMem(wasm::ValueTypes::MemSize(memtype), index, offset,
- position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(memtype.MemSize(), index, offset, position,
+ kCanOmitBoundsCheck);
if (memtype.representation() == MachineRepresentation::kWord8 ||
mcgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
@@ -4010,14 +3998,15 @@ Graph* WasmGraphBuilder::graph() { return mcgraph()->graph(); }
namespace {
Signature<MachineRepresentation>* CreateMachineSignature(
- Zone* zone, wasm::FunctionSig* sig, WasmGraphBuilder::CallOrigin origin) {
+ Zone* zone, const wasm::FunctionSig* sig,
+ WasmGraphBuilder::CallOrigin origin) {
Signature<MachineRepresentation>::Builder builder(zone, sig->return_count(),
sig->parameter_count());
for (auto ret : sig->returns()) {
if (origin == WasmGraphBuilder::kCalledFromJS) {
builder.AddReturn(MachineRepresentation::kTagged);
} else {
- builder.AddReturn(wasm::ValueTypes::MachineRepresentationFor(ret));
+ builder.AddReturn(ret.machine_representation());
}
}
@@ -4028,13 +4017,65 @@ Signature<MachineRepresentation>* CreateMachineSignature(
// provided by JavaScript, and not two 32-bit parameters.
builder.AddParam(MachineRepresentation::kTagged);
} else {
- builder.AddParam(wasm::ValueTypes::MachineRepresentationFor(param));
+ builder.AddParam(param.machine_representation());
}
}
return builder.Build();
}
+
+template <typename BuiltinDescriptor>
+CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
+ StubCallMode stub_mode) {
+ BuiltinDescriptor interface_descriptor;
+ return Linkage::GetStubCallDescriptor(
+ builder->mcgraph()->zone(), // zone
+ interface_descriptor, // descriptor
+ interface_descriptor.GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ stub_mode); // stub call mode
+}
+
} // namespace
+void WasmGraphBuilder::AddInt64LoweringReplacement(
+ CallDescriptor* original, CallDescriptor* replacement) {
+ if (!lowering_special_case_) {
+ lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
+ }
+ lowering_special_case_->replacements.insert({original, replacement});
+}
+
+CallDescriptor* WasmGraphBuilder::GetI32AtomicWaitCallDescriptor() {
+ if (i32_atomic_wait_descriptor_) return i32_atomic_wait_descriptor_;
+
+ i32_atomic_wait_descriptor_ =
+ GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
+
+ AddInt64LoweringReplacement(
+ i32_atomic_wait_descriptor_,
+ GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub));
+
+ return i32_atomic_wait_descriptor_;
+}
+
+CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
+ if (i64_atomic_wait_descriptor_) return i64_atomic_wait_descriptor_;
+
+ i64_atomic_wait_descriptor_ =
+ GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
+
+ AddInt64LoweringReplacement(
+ i64_atomic_wait_descriptor_,
+ GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub));
+
+ return i64_atomic_wait_descriptor_;
+}
+
void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
@@ -4330,6 +4371,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4GeU:
return graph()->NewNode(mcgraph()->machine()->I32x4GeU(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4Abs:
+ return graph()->NewNode(mcgraph()->machine()->I32x4Abs(), inputs[0]);
+ case wasm::kExprI32x4BitMask:
+ return graph()->NewNode(mcgraph()->machine()->I32x4BitMask(), inputs[0]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -4430,6 +4475,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8RoundingAverageU:
return graph()->NewNode(mcgraph()->machine()->I16x8RoundingAverageU(),
inputs[0], inputs[1]);
+ case wasm::kExprI16x8Abs:
+ return graph()->NewNode(mcgraph()->machine()->I16x8Abs(), inputs[0]);
+ case wasm::kExprI16x8BitMask:
+ return graph()->NewNode(mcgraph()->machine()->I16x8BitMask(), inputs[0]);
case wasm::kExprI8x16Splat:
return graph()->NewNode(mcgraph()->machine()->I8x16Splat(), inputs[0]);
case wasm::kExprI8x16Neg:
@@ -4515,6 +4564,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16RoundingAverageU:
return graph()->NewNode(mcgraph()->machine()->I8x16RoundingAverageU(),
inputs[0], inputs[1]);
+ case wasm::kExprI8x16Abs:
+ return graph()->NewNode(mcgraph()->machine()->I8x16Abs(), inputs[0]);
+ case wasm::kExprI8x16BitMask:
+ return graph()->NewNode(mcgraph()->machine()->I8x16BitMask(), inputs[0]);
case wasm::kExprS128And:
return graph()->NewNode(mcgraph()->machine()->S128And(), inputs[0],
inputs[1]);
@@ -4692,9 +4745,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
switch (opcode) {
#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment( \
- wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
- position); \
+ Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
+ inputs[0], offset, position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], effect(), control()); \
@@ -4705,9 +4757,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment( \
- wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
- position); \
+ Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
+ inputs[0], offset, position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##AtomicCompareExchange( \
MachineType::Type()), \
@@ -4717,24 +4768,22 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
#undef BUILD_ATOMIC_CMP_EXCHG
-#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment( \
- wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
- position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, effect(), control()); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
+ case wasm::kExpr##Name: { \
+ Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
+ inputs[0], offset, position); \
+ node = graph()->NewNode( \
+ mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, effect(), control()); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment( \
- wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
- position); \
+ Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
+ inputs[0], offset, position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
MemBuffer(offset), index, inputs[1], effect(), control()); \
@@ -4743,9 +4792,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
case wasm::kExprAtomicNotify: {
- Node* index = CheckBoundsAndAlignment(
- wasm::ValueTypes::MemSize(MachineType::Uint32()), inputs[0], offset,
- position);
+ Node* index = CheckBoundsAndAlignment(MachineType::Uint32().MemSize(),
+ inputs[0], offset, position);
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
@@ -4764,64 +4812,44 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
}
case wasm::kExprI32AtomicWait: {
- Node* index = CheckBoundsAndAlignment(
- wasm::ValueTypes::MemSize(MachineType::Uint32()), inputs[0], offset,
- position);
+ Node* index = CheckBoundsAndAlignment(MachineType::Uint32().MemSize(),
+ inputs[0], offset, position);
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
- Node* timeout;
- if (mcgraph()->machine()->Is32()) {
- timeout = BuildF64SConvertI64(inputs[2]);
- } else {
- timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
- inputs[2]);
- }
- WasmI32AtomicWaitDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
+
+ auto call_descriptor = GetI32AtomicWaitCallDescriptor();
+
+ intptr_t target = mcgraph()->machine()->Is64()
+ ? wasm::WasmCode::kWasmI32AtomicWait64
+ : wasm::WasmCode::kWasmI32AtomicWait32;
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmI32AtomicWait, RelocInfo::WASM_STUB_CALL);
+ target, RelocInfo::WASM_STUB_CALL);
+
node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], timeout,
+ call_target, address, inputs[1], inputs[2],
effect(), control());
break;
}
case wasm::kExprI64AtomicWait: {
- Node* index = CheckBoundsAndAlignment(
- wasm::ValueTypes::MemSize(MachineType::Uint64()), inputs[0], offset,
- position);
+ Node* index = CheckBoundsAndAlignment(MachineType::Uint64().MemSize(),
+ inputs[0], offset, position);
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
- Node* timeout;
- if (mcgraph()->machine()->Is32()) {
- timeout = BuildF64SConvertI64(inputs[2]);
- } else {
- timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
- inputs[2]);
- }
- Node* expected_value_low = graph()->NewNode(
- mcgraph()->machine()->TruncateInt64ToInt32(), inputs[1]);
- Node* tmp = graph()->NewNode(mcgraph()->machine()->Word64Shr(), inputs[1],
- Int64Constant(32));
- Node* expected_value_high =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), tmp);
- WasmI64AtomicWaitDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
+
+ CallDescriptor* call_descriptor = GetI64AtomicWaitCallDescriptor();
+
+ intptr_t target = mcgraph()->machine()->Is64()
+ ? wasm::WasmCode::kWasmI64AtomicWait64
+ : wasm::WasmCode::kWasmI64AtomicWait32;
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmI64AtomicWait, RelocInfo::WASM_STUB_CALL);
+ target, RelocInfo::WASM_STUB_CALL);
+
node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, expected_value_high,
- expected_value_low, timeout, effect(), control());
+ call_target, address, inputs[1], inputs[2],
+ effect(), control());
break;
}
@@ -4848,50 +4876,21 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
// validation.
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
- Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
- TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
-
- Node* seg_index = Uint32Constant(data_segment_index);
- auto m = mcgraph()->machine();
-
- {
- // Load segment size from WasmInstanceObject::data_segment_sizes.
- Node* seg_size_array =
- LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
- STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
- Node* scaled_index = Uint32ToUintptr(
- graph()->NewNode(m->Word32Shl(), seg_index, Int32Constant(2)));
- Node* seg_size = SetEffect(graph()->NewNode(m->Load(MachineType::Uint32()),
- seg_size_array, scaled_index,
- effect(), control()));
-
- // Bounds check the src index against the segment size.
- Node* src_fail = BoundsCheckRange(src, &size, seg_size, position);
- TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
- }
-
- {
- // Load segment's base pointer from WasmInstanceObject::data_segment_starts.
- Node* seg_start_array =
- LOAD_INSTANCE_FIELD(DataSegmentStarts, MachineType::Pointer());
- STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <=
- kMaxUInt32 / kSystemPointerSize);
- Node* scaled_index = Uint32ToUintptr(graph()->NewNode(
- m->Word32Shl(), seg_index, Int32Constant(kSystemPointerSizeLog2)));
- Node* seg_start = SetEffect(
- graph()->NewNode(m->Load(MachineType::Pointer()), seg_start_array,
- scaled_index, effect(), control()));
+ Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
+ ExternalReference::wasm_memory_init()));
- // Convert src index to pointer.
- src = graph()->NewNode(m->IntAdd(), seg_start, Uint32ToUintptr(src));
- }
+ Node* stack_slot = StoreArgsInStackSlot(
+ {{MachineType::PointerRepresentation(), instance_node_.get()},
+ {MachineRepresentation::kWord32, dst},
+ {MachineRepresentation::kWord32, src},
+ {MachineRepresentation::kWord32,
+ gasm_->Uint32Constant(data_segment_index)},
+ {MachineRepresentation::kWord32, size}});
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_copy()));
- MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Uint32()};
- MachineSignature sig(0, 3, sig_types);
- return SetEffect(BuildCCall(&sig, function, dst, src, size));
+ MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
+ MachineSignature sig(1, 1, sig_types);
+ Node* call = SetEffect(BuildCCall(&sig, function, stack_slot));
+ return TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
@@ -4909,51 +4908,77 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
mcgraph()->Int32Constant(0), effect(), control()));
}
+Node* WasmGraphBuilder::StoreArgsInStackSlot(
+ std::initializer_list<std::pair<MachineRepresentation, Node*>> args) {
+ int slot_size = 0;
+ for (auto arg : args) {
+ slot_size += ElementSizeInBytes(arg.first);
+ }
+ DCHECK_LT(0, slot_size);
+ Node* stack_slot =
+ graph()->NewNode(mcgraph()->machine()->StackSlot(slot_size));
+
+ int offset = 0;
+ for (auto arg : args) {
+ MachineRepresentation type = arg.first;
+ Node* value = arg.second;
+ gasm_->Store(StoreRepresentation(type, kNoWriteBarrier), stack_slot,
+ mcgraph()->Int32Constant(offset), value);
+ offset += ElementSizeInBytes(type);
+ }
+ return stack_slot;
+}
+
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
- Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
- TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
- Node* src_fail = BoundsCheckMemRange(&src, &size, position);
- TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
-
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_copy()));
- MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Uint32()};
- MachineSignature sig(0, 3, sig_types);
- return SetEffect(BuildCCall(&sig, function, dst, src, size));
+
+ Node* stack_slot = StoreArgsInStackSlot(
+ {{MachineType::PointerRepresentation(), instance_node_.get()},
+ {MachineRepresentation::kWord32, dst},
+ {MachineRepresentation::kWord32, src},
+ {MachineRepresentation::kWord32, size}});
+
+ MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
+ MachineSignature sig(1, 1, sig_types);
+ Node* call = SetEffect(BuildCCall(&sig, function, stack_slot));
+ return TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
- Node* fail = BoundsCheckMemRange(&dst, &size, position);
- TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
-
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill()));
- MachineType sig_types[] = {MachineType::Pointer(), MachineType::Uint32(),
- MachineType::Uint32()};
- MachineSignature sig(0, 3, sig_types);
- return SetEffect(BuildCCall(&sig, function, dst, value, size));
+
+ Node* stack_slot = StoreArgsInStackSlot(
+ {{MachineType::PointerRepresentation(), instance_node_.get()},
+ {MachineRepresentation::kWord32, dst},
+ {MachineRepresentation::kWord32, value},
+ {MachineRepresentation::kWord32, size}});
+
+ MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
+ MachineSignature sig(1, 1, sig_types);
+ Node* call = SetEffect(BuildCCall(&sig, function, stack_slot));
+ return TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
Node* WasmGraphBuilder::TableInit(uint32_t table_index,
uint32_t elem_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- DCHECK_LT(table_index, env_->module->tables.size());
- // The elem segment index must be in bounds since it is required by
- // validation.
- DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
+ auto call_descriptor = GetBuiltinCallDescriptor<WasmTableInitDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
- Node* args[] = {
+ intptr_t target = wasm::WasmCode::kWasmTableInit;
+ Node* call_target =
+ mcgraph()->RelocatableIntPtrConstant(target, RelocInfo::WASM_STUB_CALL);
+
+ return gasm_->Call(
+ call_descriptor, call_target, dst, src, size,
graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- graph()->NewNode(mcgraph()->common()->NumberConstant(elem_segment_index)),
- BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
- BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
- BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
- return SetEffect(
- BuildCallToRuntime(Runtime::kWasmTableInit, args, arraysize(args)));
+ graph()->NewNode(
+ mcgraph()->common()->NumberConstant(elem_segment_index)));
}
Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
@@ -4976,16 +5001,17 @@ Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
uint32_t table_src_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
- BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
- BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
- BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
- Node* result =
- BuildCallToRuntime(Runtime::kWasmTableCopy, args, arraysize(args));
+ auto call_descriptor = GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
- return result;
+ intptr_t target = wasm::WasmCode::kWasmTableCopy;
+ Node* call_target =
+ mcgraph()->RelocatableIntPtrConstant(target, RelocInfo::WASM_STUB_CALL);
+
+ return gasm_->Call(
+ call_descriptor, call_target, dst, src, size,
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)));
}
Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
@@ -5004,10 +5030,9 @@ Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
WasmTableObject::kCurrentLengthOffset + 1;
- Node* length_smi = LOAD_RAW(
- table,
- wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset),
- assert_size(length_field_size, MachineType::TaggedSigned()));
+ Node* length_smi = gasm_->Load(
+ assert_size(length_field_size, MachineType::TaggedSigned()), table,
+ wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset));
return BuildChangeSmiToInt32(length_smi);
}
@@ -5053,86 +5078,39 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
}
namespace {
+
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, MachineGraph* mcgraph,
- wasm::FunctionSig* sig,
+ const wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt,
StubCallMode stub_mode, wasm::WasmFeatures features)
: WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt),
stub_mode_(stub_mode),
enabled_features_(features) {}
- CallDescriptor* GetI32PairToBigIntCallDescriptor() {
- I32PairToBigIntDescriptor interface_descriptor;
-
- return Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode_); // stub call mode
- }
-
CallDescriptor* GetI64ToBigIntCallDescriptor() {
- if (!lowering_special_case_) {
- lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
- }
+ if (i64_to_bigint_descriptor_) return i64_to_bigint_descriptor_;
- if (lowering_special_case_->i64_to_bigint_call_descriptor) {
- return lowering_special_case_->i64_to_bigint_call_descriptor;
- }
+ i64_to_bigint_descriptor_ =
+ GetBuiltinCallDescriptor<I64ToBigIntDescriptor>(this, stub_mode_);
- I64ToBigIntDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode_); // stub call mode
-
- lowering_special_case_->i64_to_bigint_call_descriptor = call_descriptor;
- lowering_special_case_->i32_pair_to_bigint_call_descriptor =
- GetI32PairToBigIntCallDescriptor();
- return call_descriptor;
- }
-
- CallDescriptor* GetBigIntToI32PairCallDescriptor() {
- BigIntToI32PairDescriptor interface_descriptor;
-
- return Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode_); // stub call mode
+ AddInt64LoweringReplacement(
+ i64_to_bigint_descriptor_,
+ GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this, stub_mode_));
+ return i64_to_bigint_descriptor_;
}
CallDescriptor* GetBigIntToI64CallDescriptor() {
- if (!lowering_special_case_) {
- lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
- }
-
- if (lowering_special_case_->bigint_to_i64_call_descriptor) {
- return lowering_special_case_->bigint_to_i64_call_descriptor;
- }
+ if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
- BigIntToI64Descriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode_); // stub call mode
+ bigint_to_i64_descriptor_ =
+ GetBuiltinCallDescriptor<BigIntToI64Descriptor>(this, stub_mode_);
- lowering_special_case_->bigint_to_i64_call_descriptor = call_descriptor;
- lowering_special_case_->bigint_to_i32_pair_call_descriptor =
- GetBigIntToI32PairCallDescriptor();
- return call_descriptor;
+ AddInt64LoweringReplacement(
+ bigint_to_i64_descriptor_,
+ GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this, stub_mode_));
+ return bigint_to_i64_descriptor_;
}
Node* GetBuiltinPointerTarget(Builtins::Name builtin_id) {
@@ -5148,7 +5126,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
: GetBuiltinPointerTarget(builtin_id);
}
- Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
+ Node* BuildAllocateHeapNumberWithValue(Node* value) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kAllocateHeapNumber,
@@ -5160,12 +5138,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
allocate_heap_number_operator_.set(common->Call(call_descriptor));
}
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, effect(), control);
+ target, effect(), control());
SetEffect(
graph()->NewNode(machine->Store(StoreRepresentation(
MachineRepresentation::kFloat64, kNoWriteBarrier)),
heap_number, BuildHeapNumberValueIndexConstant(),
- value, heap_number, control));
+ value, heap_number, control()));
return heap_number;
}
@@ -5202,8 +5180,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(IsolateRoot)),
graph()->start(), graph()->start());
undefined_value_node_ = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::TaggedPointer()),
- isolate_root,
+ mcgraph()->machine()->Load(MachineType::Pointer()), isolate_root,
mcgraph()->Int32Constant(
IsolateData::root_slot_offset(RootIndex::kUndefinedValue)),
isolate_root, graph()->start());
@@ -5212,129 +5189,96 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildChangeInt32ToTagged(Node* value) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- CommonOperatorBuilder* common = mcgraph()->common();
-
if (SmiValuesAre32Bits()) {
return BuildChangeInt32ToSmi(value);
}
DCHECK(SmiValuesAre31Bits());
- Node* old_effect = effect();
- Node* old_control = control();
- Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value,
- graph()->start());
+ auto allocate_heap_number = gasm_->MakeDeferredLabel();
+ auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
- Node* ovf = graph()->NewNode(common->Projection(1), add, graph()->start());
- Node* branch =
- graph()->NewNode(common->Branch(BranchHint::kFalse), ovf, old_control);
+ // The smi value is {2 * value}. If that overflows, we need to allocate a
+ // heap number.
+ Node* add = gasm_->Int32AddWithOverflow(value, value);
+ Node* ovf = gasm_->Projection(1, add);
+ gasm_->GotoIf(ovf, &allocate_heap_number);
- Node* if_true = graph()->NewNode(common->IfTrue(), branch);
- Node* vtrue = BuildAllocateHeapNumberWithValue(
- graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
- Node* etrue = effect();
+ // If it didn't overflow, the result is {2 * value} as pointer-sized value.
+ Node* smi_tagged = BuildChangeInt32ToIntPtr(gasm_->Projection(0, add));
+ gasm_->Goto(&done, smi_tagged);
- Node* if_false = graph()->NewNode(common->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
- vfalse = BuildChangeInt32ToIntPtr(vfalse);
+ gasm_->Bind(&allocate_heap_number);
+ Node* heap_number =
+ BuildAllocateHeapNumberWithValue(gasm_->ChangeInt32ToFloat64(value));
+ gasm_->Goto(&done, heap_number);
- Node* merge =
- SetControl(graph()->NewNode(common->Merge(2), if_true, if_false));
- SetEffect(graph()->NewNode(common->EffectPhi(2), etrue, old_effect, merge));
- return graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
}
Node* BuildChangeFloat64ToTagged(Node* value) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- CommonOperatorBuilder* common = mcgraph()->common();
-
// Check several conditions:
// i32?
// ├─ true: zero?
- // │ ├─ true: negative?
- // │ │ ├─ true: box
- // │ │ └─ false: potentially Smi
+ // │ ├─ true: positive?
+ // │ │ ├─ true: potentially Smi
+ // │ │ └─ false: box (-0)
// │ └─ false: potentially Smi
- // └─ false: box
+ // └─ false: box (non-int)
// For potential Smi values, depending on whether Smis are 31 or 32 bit, we
// still need to check whether the value fits in a Smi.
- Node* old_effect = effect();
- Node* old_control = control();
- Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
- Node* check_i32 = graph()->NewNode(
- machine->Float64Equal(), value,
- graph()->NewNode(machine->ChangeInt32ToFloat64(), value32));
- Node* branch_i32 =
- graph()->NewNode(common->Branch(), check_i32, old_control);
+ auto box_value = gasm_->MakeDeferredLabel();
+ auto potentially_smi = gasm_->MakeLabel();
+ auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
- Node* if_i32 = graph()->NewNode(common->IfTrue(), branch_i32);
- Node* if_not_i32 = graph()->NewNode(common->IfFalse(), branch_i32);
+ Node* value32 = gasm_->RoundFloat64ToInt32(value);
+ Node* check_i32 =
+ gasm_->Float64Equal(value, gasm_->ChangeInt32ToFloat64(value32));
+ gasm_->GotoIfNot(check_i32, &box_value);
// We only need to check for -0 if the {value} can potentially contain -0.
- Node* check_zero = graph()->NewNode(machine->Word32Equal(), value32,
- mcgraph()->Int32Constant(0));
- Node* branch_zero = graph()->NewNode(common->Branch(BranchHint::kFalse),
- check_zero, if_i32);
-
- Node* if_zero = graph()->NewNode(common->IfTrue(), branch_zero);
- Node* if_not_zero = graph()->NewNode(common->IfFalse(), branch_zero);
-
- // In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Node* check_negative = graph()->NewNode(
- machine->Int32LessThan(),
- graph()->NewNode(machine->Float64ExtractHighWord32(), value),
- mcgraph()->Int32Constant(0));
- Node* branch_negative = graph()->NewNode(common->Branch(BranchHint::kFalse),
- check_negative, if_zero);
-
- Node* if_negative = graph()->NewNode(common->IfTrue(), branch_negative);
- Node* if_not_negative =
- graph()->NewNode(common->IfFalse(), branch_negative);
-
- // We need to create a box for negative 0.
- Node* if_smi =
- graph()->NewNode(common->Merge(2), if_not_zero, if_not_negative);
- Node* if_box = graph()->NewNode(common->Merge(2), if_not_i32, if_negative);
+ Node* check_zero = gasm_->Word32Equal(value32, gasm_->Int32Constant(0));
+ gasm_->GotoIfNot(check_zero, &potentially_smi);
+
+ // In case of 0, we need to check the MSB (sign bit).
+ Node* check_positive = gasm_->Word32Equal(
+ gasm_->Float64ExtractHighWord32(value), gasm_->Int32Constant(0));
+ gasm_->Branch(check_positive, &potentially_smi, &box_value);
+
+ gasm_->Bind(&potentially_smi);
// On 64-bit machines we can just wrap the 32-bit integer in a smi, for
// 32-bit machines we need to deal with potential overflow and fallback to
// boxing.
- Node* vsmi;
if (SmiValuesAre32Bits()) {
- vsmi = BuildChangeInt32ToSmi(value32);
+ gasm_->Goto(&done, BuildChangeInt32ToSmi(value32));
} else {
DCHECK(SmiValuesAre31Bits());
- Node* smi_tag = graph()->NewNode(machine->Int32AddWithOverflow(), value32,
- value32, if_smi);
+ // The smi value is {2 * value}. If that overflows, we need to box.
+ Node* smi_tag = gasm_->Int32AddWithOverflow(value32, value32);
- Node* check_ovf =
- graph()->NewNode(common->Projection(1), smi_tag, if_smi);
- Node* branch_ovf = graph()->NewNode(common->Branch(BranchHint::kFalse),
- check_ovf, if_smi);
+ Node* check_ovf = gasm_->Projection(1, smi_tag);
+ gasm_->GotoIf(check_ovf, &box_value);
- Node* if_ovf = graph()->NewNode(common->IfTrue(), branch_ovf);
- if_box = graph()->NewNode(common->Merge(2), if_ovf, if_box);
-
- if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
- vsmi = graph()->NewNode(common->Projection(0), smi_tag, if_smi);
- vsmi = BuildChangeInt32ToIntPtr(vsmi);
+ Node* smi_value = gasm_->Projection(0, smi_tag);
+ // With pointer compression, only the lower 32 bits are used.
+ if (!COMPRESS_POINTERS_BOOL) {
+ smi_value = BuildChangeInt32ToIntPtr(smi_value);
+ }
+ gasm_->Goto(&done, smi_value);
}
// Allocate the box for the {value}.
- Node* vbox = BuildAllocateHeapNumberWithValue(value, if_box);
- Node* ebox = effect();
+ gasm_->Bind(&box_value);
+ gasm_->Goto(&done, BuildAllocateHeapNumberWithValue(value));
- Node* merge =
- SetControl(graph()->NewNode(common->Merge(2), if_smi, if_box));
- SetEffect(graph()->NewNode(common->EffectPhi(2), old_effect, ebox, merge));
- return graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
- vsmi, vbox, merge);
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
}
int AddArgumentNodes(Vector<Node*> args, int pos, int param_count,
- wasm::FunctionSig* sig) {
+ const wasm::FunctionSig* sig) {
// Convert wasm numbers to JS values.
for (int i = 0; i < param_count; ++i) {
Node* param =
@@ -5368,27 +5312,28 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* ToJS(Node* node, wasm::ValueType type) {
- switch (type) {
- case wasm::kWasmI32:
+ switch (type.kind()) {
+ case wasm::ValueType::kI32:
return BuildChangeInt32ToTagged(node);
- case wasm::kWasmS128:
+ case wasm::ValueType::kS128:
UNREACHABLE();
- case wasm::kWasmI64: {
+ case wasm::ValueType::kI64: {
DCHECK(enabled_features_.has_bigint());
return BuildChangeInt64ToBigInt(node);
}
- case wasm::kWasmF32:
+ case wasm::ValueType::kF32:
node = graph()->NewNode(mcgraph()->machine()->ChangeFloat32ToFloat64(),
node);
return BuildChangeFloat64ToTagged(node);
- case wasm::kWasmF64:
+ case wasm::ValueType::kF64:
return BuildChangeFloat64ToTagged(node);
- case wasm::kWasmAnyRef:
- case wasm::kWasmFuncRef:
- case wasm::kWasmNullRef:
- case wasm::kWasmExnRef:
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef:
return node;
- default:
+ case wasm::ValueType::kStmt:
+ case wasm::ValueType::kBottom:
UNREACHABLE();
}
}
@@ -5436,23 +5381,21 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildTestSmi(Node* value) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- return graph()->NewNode(machine->Word32Equal(),
- graph()->NewNode(machine->Word32And(),
- BuildTruncateIntPtrToInt32(value),
- Int32Constant(kSmiTagMask)),
- Int32Constant(0));
+ return gasm_->Word32Equal(
+ gasm_->Word32And(BuildTruncateIntPtrToInt32(value),
+ gasm_->Int32Constant(kSmiTagMask)),
+ gasm_->Int32Constant(0));
}
Node* BuildFloat64ToWasm(Node* value, wasm::ValueType type) {
- switch (type) {
- case wasm::kWasmI32:
+ switch (type.kind()) {
+ case wasm::ValueType::kI32:
return graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToWord32(),
value);
- case wasm::kWasmF32:
+ case wasm::ValueType::kF32:
return graph()->NewNode(
mcgraph()->machine()->TruncateFloat64ToFloat32(), value);
- case wasm::kWasmF64:
+ case wasm::ValueType::kF64:
return value;
default:
UNREACHABLE();
@@ -5460,12 +5403,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildSmiToWasm(Node* smi, wasm::ValueType type) {
- switch (type) {
- case wasm::kWasmI32:
+ switch (type.kind()) {
+ case wasm::ValueType::kI32:
return BuildChangeSmiToInt32(smi);
- case wasm::kWasmF32:
+ case wasm::ValueType::kF32:
return BuildChangeSmiToFloat32(smi);
- case wasm::kWasmF64:
+ case wasm::ValueType::kF64:
return BuildChangeSmiToFloat64(smi);
default:
UNREACHABLE();
@@ -5473,12 +5416,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
- switch (type) {
- case wasm::kWasmAnyRef:
- case wasm::kWasmExnRef:
+ switch (type.kind()) {
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kExnRef:
return input;
- case wasm::kWasmNullRef: {
+ case wasm::ValueType::kNullRef: {
Node* check = graph()->NewNode(mcgraph()->machine()->WordEqual(), input,
RefNull());
@@ -5497,7 +5440,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return input;
}
- case wasm::kWasmFuncRef: {
+ case wasm::ValueType::kFuncRef: {
Node* check =
BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
Runtime::kWasmIsValidFuncRefValue, js_context, &input, 1)));
@@ -5517,7 +5460,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return input;
}
- case wasm::kWasmI64:
+ case wasm::ValueType::kI64:
// i64 values can only come from BigInt.
DCHECK(enabled_features_.has_bigint());
return BuildChangeBigIntToInt64(input, js_context);
@@ -5534,25 +5477,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Build a graph implementing this diagram:
// input smi?
- // ├─ true: ───────────────────────┬─ smi-to-wasm ──────┬─ result
+ // ├─ true: ──<fast path>──────────┬─ smi-to-wasm ──────┬─ result
// └─ false: ToNumber -> smi? │ │
// ├─ true: ─┘ │
// └─ false: load -> f64-to-wasm ─┘
auto smi_to_wasm = gasm_->MakeLabel(MachineRepresentation::kTaggedSigned);
- auto done =
- gasm_->MakeLabel(wasm::ValueTypes::MachineRepresentationFor(type));
+ auto call_to_number =
+ gasm_->MakeDeferredLabel(MachineRepresentation::kTaggedPointer);
+ auto done = gasm_->MakeLabel(type.machine_representation());
- // If the input is Smi, directly convert to the wasm type.
- gasm_->GotoIf(BuildTestSmi(input), &smi_to_wasm, input);
+ // Branch to smi conversion or the ToNumber call.
+ gasm_->Branch(BuildTestSmi(input), &smi_to_wasm, &call_to_number, input);
- // Otherwise, call ToNumber which returns a Smi or HeapNumber.
+ // Build the ToNumber path.
+ gasm_->Bind(&call_to_number);
auto to_number_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), TypeConversionDescriptor{}, 0,
CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
Node* to_number_target =
GetTargetForBuiltinCall(wasm::WasmCode::kToNumber, Builtins::kToNumber);
-
Node* number =
gasm_->Call(to_number_descriptor, to_number_target, input, js_context);
SetSourcePosition(number, 1);
@@ -5567,7 +5511,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildFloat64ToWasm(heap_number_value, type);
gasm_->Goto(&done, converted_heap_number_value);
- // Now implement the smi to wasm conversion.
+ // Implement the smi to wasm conversion.
gasm_->Bind(&smi_to_wasm);
Node* smi_to_wasm_result = BuildSmiToWasm(smi_to_wasm.PhiAt(0), type);
gasm_->Goto(&done, smi_to_wasm_result);
@@ -5582,8 +5526,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* isolate_root = BuildLoadIsolateRoot();
Node* thread_in_wasm_flag_address =
- LOAD_RAW(isolate_root, Isolate::thread_in_wasm_flag_address_offset(),
- MachineType::Pointer());
+ gasm_->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
if (FLAG_debug_code) {
Node* flag_value = SetEffect(
@@ -5619,35 +5563,32 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildLoadFunctionDataFromExportedFunction(Node* closure) {
- Node* shared = LOAD_RAW(
- closure,
- wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
- MachineType::AnyTagged());
- return LOAD_RAW(shared,
- SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag,
- MachineType::AnyTagged());
+ Node* shared = gasm_->Load(
+ MachineType::AnyTagged(), closure,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
+ return gasm_->Load(
+ MachineType::AnyTagged(), shared,
+ SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag);
}
Node* BuildLoadInstanceFromExportedFunctionData(Node* function_data) {
- return LOAD_RAW(function_data,
- WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag,
- MachineType::AnyTagged());
+ return gasm_->Load(
+ MachineType::AnyTagged(), function_data,
+ WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag);
}
Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data) {
- Node* function_index_smi = LOAD_RAW(
- function_data,
- WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag,
- MachineType::TaggedSigned());
+ Node* function_index_smi = gasm_->Load(
+ MachineType::TaggedSigned(), function_data,
+ WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag);
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data) {
- Node* jump_table_offset_smi = LOAD_RAW(
- function_data,
- WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag,
- MachineType::TaggedSigned());
+ Node* jump_table_offset_smi = gasm_->Load(
+ MachineType::TaggedSigned(), function_data,
+ WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag);
Node* jump_table_offset = BuildChangeSmiToIntPtr(jump_table_offset_smi);
return jump_table_offset;
}
@@ -5768,14 +5709,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildReceiverNode(Node* callable_node, Node* native_context,
Node* undefined_node) {
// Check function strict bit.
- Node* shared_function_info = LOAD_RAW(
- callable_node,
- wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
- MachineType::TaggedPointer());
+ Node* shared_function_info = gasm_->Load(
+ MachineType::TaggedPointer(), callable_node,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
Node* flags =
- LOAD_RAW(shared_function_info,
- wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo(),
- MachineType::Int32());
+ gasm_->Load(MachineType::Int32(), shared_function_info,
+ wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo());
Node* strict_check =
Binop(wasm::kExprI32And, flags,
mcgraph()->Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
@@ -5815,7 +5754,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return false;
}
- // The callable is passed as the last parameter, after WASM arguments.
+ // The callable is passed as the last parameter, after Wasm arguments.
Node* callable_node = Param(wasm_count + 1);
Node* undefined_node = BuildLoadUndefinedValueFromInstance();
@@ -5833,9 +5772,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
Node* function_context =
- LOAD_RAW(callable_node,
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction(),
- MachineType::TaggedPointer());
+ gasm_->Load(MachineType::TaggedPointer(), callable_node,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
args[pos++] = callable_node; // target callable.
// Determine receiver at runtime.
@@ -5866,9 +5804,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
base::SmallVector<Node*, 16> args(wasm_count + 9);
int pos = 0;
Node* function_context =
- LOAD_RAW(callable_node,
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction(),
- MachineType::TaggedPointer());
+ gasm_->Load(MachineType::TaggedPointer(), callable_node,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
args[pos++] = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kArgumentsAdaptorTrampoline,
RelocInfo::WASM_STUB_CALL);
@@ -5877,10 +5814,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
// Load shared function info, and then the formal parameter count.
- Node* shared_function_info = LOAD_RAW(
- callable_node,
- wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
- MachineType::TaggedPointer());
+ Node* shared_function_info = gasm_->Load(
+ MachineType::TaggedPointer(), callable_node,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
Node* formal_param_count = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Uint16()),
shared_function_info,
@@ -5978,11 +5914,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
for (wasm::ValueType type : sig_->parameters()) {
- param_bytes += wasm::ValueTypes::MemSize(type);
+ param_bytes += type.element_size_bytes();
}
int return_bytes = 0;
for (wasm::ValueType type : sig_->returns()) {
- return_bytes += wasm::ValueTypes::MemSize(type);
+ return_bytes += type.element_size_bytes();
}
int stack_slot_bytes = std::max(param_bytes, return_bytes);
@@ -6001,20 +5937,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), values,
Int32Constant(offset), Param(i + 1), effect(),
control()));
- offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ offset += type.element_size_bytes();
}
- // The function is passed as the last parameter, after WASM arguments.
+ // The function is passed as the last parameter, after Wasm arguments.
Node* function_node = Param(param_count + 1);
- Node* shared = LOAD_RAW(
- function_node,
- wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
- MachineType::AnyTagged());
- Node* sfi_data = LOAD_RAW(
- shared, SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag,
- MachineType::AnyTagged());
- Node* host_data_foreign = LOAD_RAW(
- sfi_data, WasmCapiFunctionData::kEmbedderDataOffset - kHeapObjectTag,
- MachineType::AnyTagged());
+ Node* shared = gasm_->Load(
+ MachineType::AnyTagged(), function_node,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
+ Node* sfi_data =
+ gasm_->Load(MachineType::AnyTagged(), shared,
+ SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag);
+ Node* host_data_foreign =
+ gasm_->Load(MachineType::AnyTagged(), sfi_data,
+ WasmCapiFunctionData::kEmbedderDataOffset - kHeapObjectTag);
BuildModifyThreadInWasmFlag(false);
Node* isolate_root = BuildLoadIsolateRoot();
@@ -6071,7 +6006,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(GetSafeLoadOperator(offset, type), values,
Int32Constant(offset), effect(), control()));
returns[i] = val;
- offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ offset += type.element_size_bytes();
}
Return(VectorOf(returns));
}
@@ -6091,13 +6026,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Compute size for the argument buffer.
int args_size_bytes = 0;
for (wasm::ValueType type : sig_->parameters()) {
- args_size_bytes += wasm::ValueTypes::ElementSizeInBytes(type);
+ args_size_bytes += type.element_size_bytes();
}
// The return value is also passed via this buffer:
int return_size_bytes = 0;
for (wasm::ValueType type : sig_->returns()) {
- return_size_bytes += wasm::ValueTypes::ElementSizeInBytes(type);
+ return_size_bytes += type.element_size_bytes();
}
// Get a stack slot for the arguments.
@@ -6116,7 +6051,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
Int32Constant(offset), Param(i + 1), effect(),
control()));
- offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ offset += type.element_size_bytes();
}
DCHECK_EQ(args_size_bytes, offset);
@@ -6143,7 +6078,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(GetSafeLoadOperator(offset, type), arg_buffer,
Int32Constant(offset), effect(), control()));
returns[i] = val;
- offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ offset += type.element_size_bytes();
}
Return(VectorOf(returns));
}
@@ -6267,7 +6202,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(GetSafeLoadOperator(offset, type), arg_buffer,
Int32Constant(offset), effect(), control()));
args[pos++] = arg_load;
- offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ offset += type.element_size_bytes();
}
args[pos++] = effect();
@@ -6300,7 +6235,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
Int32Constant(offset), value, effect(),
control()));
- offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ offset += type.element_size_bytes();
pos++;
}
@@ -6329,27 +6264,31 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
wasm::WasmFeatures enabled_features_;
+ CallDescriptor* bigint_to_i64_descriptor_ = nullptr;
+ CallDescriptor* i64_to_bigint_descriptor_ = nullptr;
};
} // namespace
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
- Isolate* isolate, wasm::WasmEngine* wasm_engine, wasm::FunctionSig* sig,
- bool is_import, const wasm::WasmFeatures& enabled_features) {
+ Isolate* isolate, wasm::WasmEngine* wasm_engine,
+ const wasm::FunctionSig* sig, bool is_import,
+ const wasm::WasmFeatures& enabled_features) {
//----------------------------------------------------------------------------
// Create the Graph.
//----------------------------------------------------------------------------
std::unique_ptr<Zone> zone =
std::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
- CommonOperatorBuilder common(zone.get());
- MachineOperatorBuilder machine(
+ CommonOperatorBuilder* common =
+ new (zone.get()) CommonOperatorBuilder(zone.get());
+ MachineOperatorBuilder* machine = new (zone.get()) MachineOperatorBuilder(
zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- MachineGraph mcgraph(graph, &common, &machine);
+ MachineGraph* mcgraph = new (zone.get()) MachineGraph(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr,
StubCallMode::kCallBuiltinPointer,
enabled_features);
builder.BuildJSToWasmWrapper(is_import);
@@ -6375,13 +6314,13 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
}
std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
- Handle<JSReceiver> callable, wasm::FunctionSig* expected_sig,
+ Handle<JSReceiver> callable, const wasm::FunctionSig* expected_sig,
const wasm::WasmFeatures& enabled_features) {
if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
auto imported_function = Handle<WasmExportedFunction>::cast(callable);
auto func_index = imported_function->function_index();
auto module = imported_function->instance().module();
- wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
+ const wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
if (*imported_sig != *expected_sig) {
return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
@@ -6420,14 +6359,15 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
SharedFunctionInfo shared = function->shared();
// Check for math intrinsics.
-#define COMPARE_SIG_FOR_BUILTIN(name) \
- { \
- wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
- if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
- DCHECK_NOT_NULL(sig); \
- if (*expected_sig == *sig) { \
- return std::make_pair(WasmImportCallKind::k##name, callable); \
- } \
+#define COMPARE_SIG_FOR_BUILTIN(name) \
+ { \
+ const wasm::FunctionSig* sig = \
+ wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
+ if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
+ DCHECK_NOT_NULL(sig); \
+ if (*expected_sig == *sig) { \
+ return std::make_pair(WasmImportCallKind::k##name, callable); \
+ } \
}
#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
case Builtins::kMath##name: \
@@ -6524,7 +6464,7 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::WasmEngine* wasm_engine, WasmImportCallKind kind,
- wasm::FunctionSig* sig) {
+ const wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6532,7 +6472,7 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
Zone zone(wasm_engine->allocator(), ZONE_NAME);
- // Compile a WASM function with a single bytecode and let TurboFan
+ // Compile a Wasm function with a single bytecode and let TurboFan
// generate either inlined machine code or a call to a helper.
SourcePositionTable* source_positions = nullptr;
MachineGraph* mcgraph = new (&zone) MachineGraph(
@@ -6587,7 +6527,8 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::WasmCompilationResult CompileWasmImportCallWrapper(
wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
- WasmImportCallKind kind, wasm::FunctionSig* sig, bool source_positions) {
+ WasmImportCallKind kind, const wasm::FunctionSig* sig,
+ bool source_positions) {
DCHECK_NE(WasmImportCallKind::kLinkError, kind);
DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
@@ -6604,18 +6545,18 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
// Create the Graph
//----------------------------------------------------------------------------
Zone zone(wasm_engine->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(
+ Graph* graph = new (&zone) Graph(&zone);
+ CommonOperatorBuilder* common = new (&zone) CommonOperatorBuilder(&zone);
+ MachineOperatorBuilder* machine = new (&zone) MachineOperatorBuilder(
&zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- MachineGraph mcgraph(&graph, &common, &machine);
+ MachineGraph* mcgraph = new (&zone) MachineGraph(graph, common, machine);
SourcePositionTable* source_position_table =
- source_positions ? new (&zone) SourcePositionTable(&graph) : nullptr;
+ source_positions ? new (&zone) SourcePositionTable(graph) : nullptr;
- WasmWrapperGraphBuilder builder(&zone, &mcgraph, sig, source_position_table,
+ WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, source_position_table,
StubCallMode::kCallWasmRuntimeStub,
env->enabled_features);
builder.BuildWasmImportCallWrapper(kind);
@@ -6626,11 +6567,11 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
CallDescriptor* incoming =
GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
WasmCallKind::kWasmImportWrapper);
- if (machine.Is32()) {
+ if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- wasm_engine, incoming, &mcgraph, Code::WASM_TO_JS_FUNCTION,
+ wasm_engine, incoming, mcgraph, Code::WASM_TO_JS_FUNCTION,
wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
source_position_table);
result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
@@ -6639,7 +6580,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
wasm::NativeModule* native_module,
- wasm::FunctionSig* sig,
+ const wasm::FunctionSig* sig,
Address address) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileWasmCapiFunction");
@@ -6682,35 +6623,36 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
WasmStubAssemblerOptions(), source_positions);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
wasm::ExecutionTier::kNone);
return native_module->PublishCode(std::move(wasm_code));
}
wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmEngine* wasm_engine, const wasm::WasmFeatures& enabled_features,
- uint32_t func_index, wasm::FunctionSig* sig) {
+ uint32_t func_index, const wasm::FunctionSig* sig) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
Zone zone(wasm_engine->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(
+ Graph* graph = new (&zone) Graph(&zone);
+ CommonOperatorBuilder* common = new (&zone) CommonOperatorBuilder(&zone);
+ MachineOperatorBuilder* machine = new (&zone) MachineOperatorBuilder(
&zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- MachineGraph mcgraph(&graph, &common, &machine);
+ MachineGraph* mcgraph = new (&zone) MachineGraph(graph, common, machine);
- WasmWrapperGraphBuilder builder(&zone, &mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, nullptr,
StubCallMode::kCallWasmRuntimeStub,
enabled_features);
builder.BuildWasmInterpreterEntry(func_index);
// Schedule and compile to machine code.
CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
- if (machine.Is32()) {
+ if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
@@ -6719,7 +6661,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- wasm_engine, incoming, &mcgraph, Code::WASM_INTERPRETER_ENTRY,
+ wasm_engine, incoming, mcgraph, Code::WASM_INTERPRETER_ENTRY,
wasm::WasmCode::kInterpreterEntry, func_name.begin(),
WasmStubAssemblerOptions());
result.result_tier = wasm::ExecutionTier::kInterpreter;
@@ -6729,18 +6671,19 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
}
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
- wasm::FunctionSig* sig) {
+ const wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
- CommonOperatorBuilder common(zone.get());
- MachineOperatorBuilder machine(
+ CommonOperatorBuilder* common =
+ new (zone.get()) CommonOperatorBuilder(zone.get());
+ MachineOperatorBuilder* machine = new (zone.get()) MachineOperatorBuilder(
zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- MachineGraph mcgraph(graph, &common, &machine);
+ MachineGraph* mcgraph = new (zone.get()) MachineGraph(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr,
StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildJSToJSWrapper(isolate);
@@ -6774,18 +6717,20 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
return code;
}
-MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
+MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate,
+ const wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
- CommonOperatorBuilder common(zone.get());
- MachineOperatorBuilder machine(
+ CommonOperatorBuilder* common =
+ new (zone.get()) CommonOperatorBuilder(zone.get());
+ MachineOperatorBuilder* machine = new (zone.get()) MachineOperatorBuilder(
zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- MachineGraph mcgraph(graph, &common, &machine);
+ MachineGraph* mcgraph = new (zone.get()) MachineGraph(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), &mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr,
StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildCWasmEntry();
@@ -7004,7 +6949,7 @@ class LinkageLocationAllocator {
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, wasm::FunctionSig* fsig,
+ Zone* zone, const wasm::FunctionSig* fsig,
WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
@@ -7027,16 +6972,14 @@ CallDescriptor* GetWasmCallDescriptor(
// during frame iteration.
const size_t parameter_count = fsig->parameter_count();
for (size_t i = 0; i < parameter_count; i++) {
- MachineRepresentation param =
- wasm::ValueTypes::MachineRepresentationFor(fsig->GetParam(i));
+ MachineRepresentation param = fsig->GetParam(i).machine_representation();
// Skip tagged parameters (e.g. any-ref).
if (IsAnyTagged(param)) continue;
auto l = params.Next(param);
locations.AddParamAt(i + param_offset, l);
}
for (size_t i = 0; i < parameter_count; i++) {
- MachineRepresentation param =
- wasm::ValueTypes::MachineRepresentationFor(fsig->GetParam(i));
+ MachineRepresentation param = fsig->GetParam(i).machine_representation();
// Skip untagged parameters.
if (!IsAnyTagged(param)) continue;
auto l = params.Next(param);
@@ -7061,8 +7004,7 @@ CallDescriptor* GetWasmCallDescriptor(
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
- MachineRepresentation ret =
- wasm::ValueTypes::MachineRepresentationFor(fsig->GetReturn(i));
+ MachineRepresentation ret = fsig->GetReturn(i).machine_representation();
auto l = rets.Next(ret);
locations.AddReturn(l);
}
@@ -7210,12 +7152,9 @@ AssemblerOptions WasmStubAssemblerOptions() {
return options;
}
-#undef WASM_64
#undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
-#undef LOAD_RAW
-#undef LOAD_RAW_NODE_OFFSET
#undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER
#undef LOAD_TAGGED_ANY
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 7782ebfdec..176fdb63c4 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -58,16 +58,16 @@ wasm::WasmCompilationResult ExecuteInterpreterEntryCompilation(
wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&,
int func_index, Counters*, wasm::WasmFeatures* detected);
-// Calls to WASM imports are handled in several different ways, depending on the
+// Calls to Wasm imports are handled in several different ways, depending on the
// type of the target function/callable and whether the signature matches the
// argument arity.
enum class WasmImportCallKind : uint8_t {
- kLinkError, // static WASM->WASM type error
- kRuntimeTypeError, // runtime WASM->JS type error
- kWasmToCapi, // fast WASM->C-API call
- kWasmToWasm, // fast WASM->WASM call
- kJSFunctionArityMatch, // fast WASM->JS call
- kJSFunctionArityMismatch, // WASM->JS, needs adapter frame
+ kLinkError, // static Wasm->Wasm type error
+ kRuntimeTypeError, // runtime Wasm->JS type error
+ kWasmToCapi, // fast Wasm->C-API call
+ kWasmToWasm, // fast Wasm->Wasm call
+ kJSFunctionArityMatch, // fast Wasm->JS call
+ kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
// Math functions imported from JavaScript that are intrinsified
kFirstMathIntrinsic,
kF64Acos = kFirstMathIntrinsic,
@@ -106,35 +106,36 @@ constexpr WasmImportCallKind kDefaultImportCallKind =
// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap
// another target, which is why the ultimate target is returned as well.
V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
-ResolveWasmImportCall(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
+ResolveWasmImportCall(Handle<JSReceiver> callable, const wasm::FunctionSig* sig,
const wasm::WasmFeatures& enabled_features);
-// Compiles an import call wrapper, which allows WASM to call imports.
+// Compiles an import call wrapper, which allows Wasm to call imports.
V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
wasm::WasmEngine*, wasm::CompilationEnv* env, WasmImportCallKind,
- wasm::FunctionSig*, bool source_positions);
+ const wasm::FunctionSig*, bool source_positions);
-// Compiles a host call wrapper, which allows WASM to call host functions.
+// Compiles a host call wrapper, which allows Wasm to call host functions.
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
wasm::NativeModule*,
- wasm::FunctionSig*, Address address);
+ const wasm::FunctionSig*,
+ Address address);
// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
- Isolate* isolate, wasm::WasmEngine* wasm_engine, wasm::FunctionSig* sig,
- bool is_import, const wasm::WasmFeatures& enabled_features);
+ Isolate* isolate, wasm::WasmEngine* wasm_engine,
+ const wasm::FunctionSig* sig, bool is_import,
+ const wasm::WasmFeatures& enabled_features);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmEngine*, const wasm::WasmFeatures& enabled_features,
- uint32_t func_index, wasm::FunctionSig*);
+ uint32_t func_index, const wasm::FunctionSig*);
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
-MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
- wasm::FunctionSig* sig);
+MaybeHandle<Code> CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*);
enum CWasmEntryParameters {
kCodeEntry,
@@ -147,11 +148,11 @@ enum CWasmEntryParameters {
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
// which knows how to feed it its parameters.
-MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
+MaybeHandle<Code> CompileCWasmEntry(Isolate*, const wasm::FunctionSig*);
-// Values from the instance object are cached between WASM-level function calls.
+// Values from the instance object are cached between Wasm-level function calls.
// This struct allows the SSA environment handling this cache to be defined
-// and manipulated in wasm-compiler.{h,cc} instead of inside the WASM decoder.
+// and manipulated in wasm-compiler.{h,cc} instead of inside the Wasm decoder.
// (Note that currently, the globals base is immutable, so not cached here.)
struct WasmInstanceCacheNodes {
Node* mem_start;
@@ -174,7 +175,8 @@ class WasmGraphBuilder {
V8_EXPORT_PRIVATE WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
- wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr);
+ const wasm::FunctionSig* sig,
+ compiler::SourcePositionTable* spt = nullptr);
V8_EXPORT_PRIVATE ~WasmGraphBuilder();
@@ -211,7 +213,7 @@ class WasmGraphBuilder {
Node* Rethrow(Node* except_obj);
Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
Node* LoadExceptionTagFromTable(uint32_t exception_index);
- Node* GetExceptionTag(Node* except_obj);
+ Node* GetExceptionTag(Node* except_obj, wasm::WasmCodePosition);
Node* GetExceptionValues(Node* except_obj,
const wasm::WasmException* exception,
Vector<Node*> values_out);
@@ -283,7 +285,12 @@ class WasmGraphBuilder {
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* LoadTransform(MachineType memtype,
+#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
+ Node* LoadTransformBigEndian(MachineType memtype,
+ wasm::LoadTransformationKind transform,
+ Node* value);
+#endif
+ Node* LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
@@ -335,7 +342,7 @@ class WasmGraphBuilder {
this->instance_cache_ = instance_cache;
}
- wasm::FunctionSig* GetFunctionSignature() { return sig_; }
+ const wasm::FunctionSig* GetFunctionSignature() { return sig_; }
enum CallOrigin { kCalledFromWasm, kCalledFromJS };
@@ -430,7 +437,7 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
- Node* BuildCallNode(wasm::FunctionSig* sig, Vector<Node*> args,
+ Node* BuildCallNode(const wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position, Node* instance_node,
const Operator* op);
// Helper function for {BuildIndirectCall}.
@@ -441,16 +448,16 @@ class WasmGraphBuilder {
Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
IsReturnCall continuation);
- Node* BuildWasmCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Node* BuildWasmCall(const wasm::FunctionSig* sig, Vector<Node*> args,
Vector<Node*> rets, wasm::WasmCodePosition position,
Node* instance_node, UseRetpoline use_retpoline);
- Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Node* BuildWasmReturnCall(const wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, UseRetpoline use_retpoline);
- Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Node* BuildImportCall(const wasm::FunctionSig* sig, Vector<Node*> args,
Vector<Node*> rets, wasm::WasmCodePosition position,
int func_index, IsReturnCall continuation);
- Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args,
+ Node* BuildImportCall(const wasm::FunctionSig* sig, Vector<Node*> args,
Vector<Node*> rets, wasm::WasmCodePosition position,
Node* func_index, IsReturnCall continuation);
@@ -551,6 +558,16 @@ class WasmGraphBuilder {
Node** parameters, int parameter_count);
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
+ void AddInt64LoweringReplacement(CallDescriptor* original,
+ CallDescriptor* replacement);
+
+ CallDescriptor* GetI32AtomicWaitCallDescriptor();
+
+ CallDescriptor* GetI64AtomicWaitCallDescriptor();
+
+ Node* StoreArgsInStackSlot(
+ std::initializer_list<std::pair<MachineRepresentation, Node*>> args);
+
std::unique_ptr<WasmGraphAssembler> gasm_;
Zone* const zone_;
MachineGraph* const mcgraph_;
@@ -569,19 +586,21 @@ class WasmGraphBuilder {
bool needs_stack_check_ = false;
const bool untrusted_code_mitigations_ = true;
- wasm::FunctionSig* const sig_;
+ const wasm::FunctionSig* const sig_;
compiler::WasmDecorator* decorator_ = nullptr;
compiler::SourcePositionTable* const source_position_table_ = nullptr;
std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
+ CallDescriptor* i32_atomic_wait_descriptor_ = nullptr;
+ CallDescriptor* i64_atomic_wait_descriptor_ = nullptr;
};
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, wasm::FunctionSig* signature,
+ Zone* zone, const wasm::FunctionSig* signature,
WasmGraphBuilder::UseRetpoline use_retpoline =
WasmGraphBuilder::kNoRetpoline,
WasmCallKind kind = kWasmFunction);
diff --git a/deps/v8/src/d8/OWNERS b/deps/v8/src/d8/OWNERS
index a548e5a509..d57b8cb779 100644
--- a/deps/v8/src/d8/OWNERS
+++ b/deps/v8/src/d8/OWNERS
@@ -1,4 +1,5 @@
binji@chromium.org
bmeurer@chromium.org
clemensb@chromium.org
+ulan@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/d8/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index cb00dd521a..f96aad4123 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -47,11 +47,8 @@ static AsyncHooksWrap* UnwrapHook(
AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
if (!hooks->async_hook_ctor.Get(isolate)->HasInstance(hook)) {
- isolate->ThrowException(
- String::NewFromUtf8(
- isolate, "Invalid 'this' passed instead of AsyncHooks instance",
- NewStringType::kNormal)
- .ToLocalChecked());
+ isolate->ThrowException(String::NewFromUtf8Literal(
+ isolate, "Invalid 'this' passed instead of AsyncHooks instance"));
return nullptr;
}
@@ -90,10 +87,8 @@ Local<Object> AsyncHooks::CreateHook(
Local<Context> currentContext = isolate->GetCurrentContext();
if (args.Length() != 1 || !args[0]->IsObject()) {
- isolate->ThrowException(
- String::NewFromUtf8(isolate, "Invalid arguments passed to createHook",
- NewStringType::kNormal)
- .ToLocalChecked());
+ isolate->ThrowException(String::NewFromUtf8Literal(
+ isolate, "Invalid arguments passed to createHook"));
return Local<Object>();
}
@@ -101,15 +96,12 @@ Local<Object> AsyncHooks::CreateHook(
Local<Object> fn_obj = args[0].As<Object>();
-#define SET_HOOK_FN(name) \
- Local<Value> name##_v = \
- fn_obj \
- ->Get(currentContext, \
- String::NewFromUtf8(isolate, #name, NewStringType::kNormal) \
- .ToLocalChecked()) \
- .ToLocalChecked(); \
- if (name##_v->IsFunction()) { \
- wrap->set_##name##_function(name##_v.As<Function>()); \
+#define SET_HOOK_FN(name) \
+ Local<Value> name##_v = \
+ fn_obj->Get(currentContext, String::NewFromUtf8Literal(isolate, #name)) \
+ .ToLocalChecked(); \
+ if (name##_v->IsFunction()) { \
+ wrap->set_##name##_function(name##_v.As<Function>()); \
}
SET_HOOK_FN(init);
@@ -197,19 +189,16 @@ void AsyncHooks::Initialize() {
async_hook_ctor.Reset(isolate_, FunctionTemplate::New(isolate_));
async_hook_ctor.Get(isolate_)->SetClassName(
- String::NewFromUtf8(isolate_, "AsyncHook", NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8Literal(isolate_, "AsyncHook"));
async_hooks_templ.Reset(isolate_,
async_hook_ctor.Get(isolate_)->InstanceTemplate());
async_hooks_templ.Get(isolate_)->SetInternalFieldCount(1);
async_hooks_templ.Get(isolate_)->Set(
- String::NewFromUtf8(isolate_, "enable", v8::NewStringType::kNormal)
- .ToLocalChecked(),
+ String::NewFromUtf8Literal(isolate_, "enable"),
FunctionTemplate::New(isolate_, EnableHook));
async_hooks_templ.Get(isolate_)->Set(
- String::NewFromUtf8(isolate_, "disable", v8::NewStringType::kNormal)
- .ToLocalChecked(),
+ String::NewFromUtf8Literal(isolate_, "disable"),
FunctionTemplate::New(isolate_, DisableHook));
async_id_smb.Reset(isolate_, Private::New(isolate_));
@@ -262,10 +251,7 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
if (type == PromiseHookType::kInit) {
if (!wrap->init_function().IsEmpty()) {
Local<Value> initArgs[4] = {
- async_id,
- String::NewFromUtf8(hooks->isolate_, "PROMISE",
- NewStringType::kNormal)
- .ToLocalChecked(),
+ async_id, String::NewFromUtf8Literal(hooks->isolate_, "PROMISE"),
promise
->GetPrivate(context, hooks->trigger_id_smb.Get(hooks->isolate_))
.ToLocalChecked(),
diff --git a/deps/v8/src/d8/d8-console.cc b/deps/v8/src/d8/d8-console.cc
index 9c133b60f4..5ecdf6767b 100644
--- a/deps/v8/src/d8/d8-console.cc
+++ b/deps/v8/src/d8/d8-console.cc
@@ -44,9 +44,7 @@ void D8Console::Assert(const debug::ConsoleCallArguments& args,
if (args.Length() > 0 && args[0]->BooleanValue(isolate_)) return;
WriteToFile("console.assert", stdout, isolate_, args);
isolate_->ThrowException(v8::Exception::Error(
- v8::String::NewFromUtf8(isolate_, "console.assert failed",
- v8::NewStringType::kNormal)
- .ToLocalChecked()));
+ v8::String::NewFromUtf8Literal(isolate_, "console.assert failed")));
}
void D8Console::Log(const debug::ConsoleCallArguments& args,
diff --git a/deps/v8/src/d8/d8-posix.cc b/deps/v8/src/d8/d8-posix.cc
index 23767ba2b5..1c9f506641 100644
--- a/deps/v8/src/d8/d8-posix.cc
+++ b/deps/v8/src/d8/d8-posix.cc
@@ -158,11 +158,8 @@ class ExecArgs {
bool Init(Isolate* isolate, Local<Value> arg0, Local<Array> command_args) {
String::Utf8Value prog(isolate, arg0);
if (*prog == nullptr) {
- const char* message =
- "os.system(): String conversion of program name failed";
- isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ isolate->ThrowException(String::NewFromUtf8Literal(
+ isolate, "os.system(): String conversion of program name failed"));
return false;
}
int len = prog.length() + 3;
@@ -178,11 +175,8 @@ class ExecArgs {
String::Utf8Value utf8_arg(isolate, arg);
if (*utf8_arg == nullptr) {
exec_args_[i] = nullptr; // Consistent state for destructor.
- const char* message =
- "os.system(): String conversion of argument failed.";
- isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ isolate->ThrowException(String::NewFromUtf8Literal(
+ isolate, "os.system(): String conversion of argument failed."));
return false;
}
int len = utf8_arg.length() + 1;
@@ -219,11 +213,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromJust();
} else {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(),
- "system: Argument 4 must be a number",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "system: Argument 4 must be a number"));
return false;
}
}
@@ -233,11 +224,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromJust();
} else {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(),
- "system: Argument 3 must be a number",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "system: Argument 3 must be a number"));
return false;
}
}
@@ -279,8 +267,7 @@ static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
} while (bytes_read == -1 && errno == EINTR);
if (bytes_read != 0) {
isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(err), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(err)).ToLocalChecked());
return false;
}
return true;
@@ -299,8 +286,7 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
return isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
}
int bytes_read;
@@ -311,10 +297,8 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
if (errno == EAGAIN) {
if (!WaitOnFD(child_fd, read_timeout, total_timeout, start_time) ||
(TimeIsOut(start_time, total_timeout))) {
- return isolate->ThrowException(
- String::NewFromUtf8(isolate, "Timed out waiting for output",
- NewStringType::kNormal)
- .ToLocalChecked());
+ return isolate->ThrowException(String::NewFromUtf8Literal(
+ isolate, "Timed out waiting for output"));
}
continue;
} else if (errno == EINTR) {
@@ -372,11 +356,8 @@ static bool WaitForChild(Isolate* isolate, int pid,
if (useconds < 1000000) useconds <<= 1;
if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
(TimeIsOut(start_time, total_timeout))) {
- isolate->ThrowException(
- String::NewFromUtf8(isolate,
- "Timed out waiting for process to terminate",
- NewStringType::kNormal)
- .ToLocalChecked());
+ isolate->ThrowException(String::NewFromUtf8Literal(
+ isolate, "Timed out waiting for process to terminate"));
kill(pid, SIGINT);
return false;
}
@@ -386,8 +367,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
snprintf(message, sizeof(message), "Child killed by signal %d",
child_info.si_status);
isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, message).ToLocalChecked());
return false;
}
if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
@@ -395,8 +375,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
snprintf(message, sizeof(message), "Child exited with status %d",
child_info.si_status);
isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, message).ToLocalChecked());
return false;
}
@@ -410,8 +389,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
snprintf(message, sizeof(message), "Child killed by signal %d",
WTERMSIG(child_status));
isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, message).ToLocalChecked());
return false;
}
if (WEXITSTATUS(child_status) != 0) {
@@ -420,8 +398,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
snprintf(message, sizeof(message), "Child exited with status %d",
exit_status);
isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, message).ToLocalChecked());
return false;
}
@@ -441,11 +418,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(),
- "system: Argument 2 must be an array",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "system: Argument 2 must be an array"));
return;
}
command_args = Local<Array>::Cast(args[1]);
@@ -453,17 +427,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
command_args = Array::New(args.GetIsolate(), 0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), "Too many arguments to system()",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "Too many arguments to system()"));
return;
}
if (args.Length() < 1) {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), "Too few arguments to system()",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "Too few arguments to system()"));
return;
}
@@ -479,16 +449,12 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (pipe(exec_error_fds) != 0) {
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed.",
- NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8Literal(args.GetIsolate(), "pipe syscall failed."));
return;
}
if (pipe(stdout_fds) != 0) {
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed.",
- NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8Literal(args.GetIsolate(), "pipe syscall failed."));
return;
}
@@ -526,24 +492,20 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- const char* message = "chdir() takes one argument";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "chdir() takes one argument"));
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
if (*directory == nullptr) {
- const char* message = "os.chdir(): String conversion of argument failed.";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "os.chdir(): String conversion of argument failed."));
return;
}
if (chdir(*directory) != 0) {
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), strerror(errno),
- NewStringType::kNormal)
+ String::NewFromUtf8(args.GetIsolate(), strerror(errno))
.ToLocalChecked());
return;
}
@@ -551,10 +513,8 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- const char* message = "umask() takes one argument";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "umask() takes one argument"));
return;
}
if (args[0]->IsNumber()) {
@@ -563,10 +523,8 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(previous);
return;
} else {
- const char* message = "umask() argument must be numeric";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "umask() argument must be numeric"));
return;
}
}
@@ -576,14 +534,12 @@ static bool CheckItsADirectory(Isolate* isolate, char* directory) {
int stat_result = stat(directory, &stat_buf);
if (stat_result != 0) {
isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
return false;
}
if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(EEXIST), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(EEXIST)).ToLocalChecked());
return false;
}
@@ -598,8 +554,7 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
char* last_slash = strrchr(directory, '/');
if (last_slash == nullptr) {
isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
return false;
}
*last_slash = 0;
@@ -611,13 +566,11 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
return CheckItsADirectory(isolate, directory);
}
isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
return false;
} else {
isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
return false;
}
}
@@ -630,26 +583,20 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromJust();
} else {
- const char* message = "mkdirp() second argument must be numeric";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message,
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "mkdirp() second argument must be numeric"));
return;
}
} else if (args.Length() != 1) {
- const char* message = "mkdirp() takes one or two arguments";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "mkdirp() takes one or two arguments"));
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
if (*directory == nullptr) {
- const char* message = "os.mkdirp(): String conversion of argument failed.";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "os.mkdirp(): String conversion of argument failed."));
return;
}
mkdirp(args.GetIsolate(), *directory, mask);
@@ -657,18 +604,15 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- const char* message = "rmdir() takes one or two arguments";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "rmdir() takes one or two arguments"));
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
if (*directory == nullptr) {
- const char* message = "os.rmdir(): String conversion of argument failed.";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "os.rmdir(): String conversion of argument failed."));
return;
}
rmdir(*directory);
@@ -676,28 +620,22 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
- const char* message = "setenv() takes two arguments";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "setenv() takes two arguments"));
return;
}
String::Utf8Value var(args.GetIsolate(), args[0]);
String::Utf8Value value(args.GetIsolate(), args[1]);
if (*var == nullptr) {
- const char* message =
- "os.setenv(): String conversion of variable name failed.";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "os.setenv(): String conversion of variable name failed."));
return;
}
if (*value == nullptr) {
- const char* message =
- "os.setenv(): String conversion of variable contents failed.";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "os.setenv(): String conversion of variable contents failed."));
return;
}
setenv(*var, *value, 1);
@@ -705,19 +643,15 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- const char* message = "unsetenv() takes one argument";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(), "unsetenv() takes one argument"));
return;
}
String::Utf8Value var(args.GetIsolate(), args[0]);
if (*var == nullptr) {
- const char* message =
- "os.setenv(): String conversion of variable name failed.";
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "os.setenv(): String conversion of variable name failed."));
return;
}
unsetenv(*var);
@@ -827,27 +761,20 @@ char* Shell::ReadCharsFromTcpPort(const char* name, int* size_out) {
void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {
if (options.enable_os_system) {
- os_templ->Set(String::NewFromUtf8(isolate, "system", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "system"),
FunctionTemplate::New(isolate, System));
}
- os_templ->Set(String::NewFromUtf8(isolate, "chdir", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "chdir"),
FunctionTemplate::New(isolate, ChangeDirectory));
- os_templ->Set(String::NewFromUtf8(isolate, "setenv", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "setenv"),
FunctionTemplate::New(isolate, SetEnvironment));
- os_templ->Set(String::NewFromUtf8(isolate, "unsetenv", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "unsetenv"),
FunctionTemplate::New(isolate, UnsetEnvironment));
- os_templ->Set(String::NewFromUtf8(isolate, "umask", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "umask"),
FunctionTemplate::New(isolate, SetUMask));
- os_templ->Set(String::NewFromUtf8(isolate, "mkdirp", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "mkdirp"),
FunctionTemplate::New(isolate, MakeDirectory));
- os_templ->Set(String::NewFromUtf8(isolate, "rmdir", NewStringType::kNormal)
- .ToLocalChecked(),
+ os_templ->Set(String::NewFromUtf8Literal(isolate, "rmdir"),
FunctionTemplate::New(isolate, RemoveDirectory));
}
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index d6d21bf5fa..dd00c58288 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -10,6 +10,8 @@
#include <algorithm>
#include <fstream>
#include <iomanip>
+#include <iterator>
+#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
@@ -35,6 +37,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
@@ -308,8 +311,7 @@ std::unique_ptr<v8::Platform> g_platform;
static Local<Value> Throw(Isolate* isolate, const char* message) {
return isolate->ThrowException(
- String::NewFromUtf8(isolate, message, NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8(isolate, message).ToLocalChecked());
}
static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate,
@@ -317,8 +319,7 @@ static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate,
Local<v8::Object> object,
const char* property) {
Local<String> v8_str =
- String::NewFromUtf8(isolate, property, NewStringType::kNormal)
- .FromMaybe(Local<String>());
+ String::NewFromUtf8(isolate, property).FromMaybe(Local<String>());
if (v8_str.IsEmpty()) return Local<Value>();
return object->Get(context, v8_str);
}
@@ -370,8 +371,7 @@ class TraceConfigParser {
HandleScope inner_scope(isolate);
Local<String> source =
- String::NewFromUtf8(isolate, json_str, NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8(isolate, json_str).ToLocalChecked();
Local<Value> result = JSON::Parse(context, source).ToLocalChecked();
Local<v8::Object> trace_config_object = Local<v8::Object>::Cast(result);
@@ -552,7 +552,8 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
i::Handle<i::Script> script =
parse_info.CreateScript(i_isolate, str, options.compile_options);
- if (!i::parsing::ParseProgram(&parse_info, script, i_isolate)) {
+ if (!i::parsing::ParseProgram(&parse_info, script, i::kNullMaybeHandle,
+ i_isolate)) {
fprintf(stderr, "Failed parsing\n");
return false;
}
@@ -640,7 +641,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
DCHECK(try_catch.HasCaught());
return false;
}
- // It's possible that a FinalizationGroup cleanup task threw an error.
+ // It's possible that a FinalizationRegistry cleanup task threw an error.
if (try_catch.HasCaught()) success = false;
if (print_result) {
if (options.test_shell) {
@@ -706,18 +707,29 @@ std::string DirName(const std::string& path) {
// and replacing backslashes with slashes).
std::string NormalizePath(const std::string& path,
const std::string& dir_name) {
- std::string result;
+ std::string absolute_path;
if (IsAbsolutePath(path)) {
- result = path;
+ absolute_path = path;
} else {
- result = dir_name + '/' + path;
+ absolute_path = dir_name + '/' + path;
}
- std::replace(result.begin(), result.end(), '\\', '/');
- size_t i;
- while ((i = result.find("/./")) != std::string::npos) {
- result.erase(i, 2);
+ std::replace(absolute_path.begin(), absolute_path.end(), '\\', '/');
+ std::vector<std::string> segments;
+ std::istringstream segment_stream(absolute_path);
+ std::string segment;
+ while (std::getline(segment_stream, segment, '/')) {
+ if (segment == "..") {
+ segments.pop_back();
+ } else if (segment != ".") {
+ segments.push_back(segment);
+ }
}
- return result;
+ // Join path segments.
+ std::ostringstream os;
+ std::copy(segments.begin(), segments.end() - 1,
+ std::ostream_iterator<std::string>(os, "/"));
+ os << *segments.rbegin();
+ return os.str();
}
// Per-context Module data, allowing sharing of module maps
@@ -785,14 +797,21 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
DCHECK(IsAbsolutePath(file_name));
Isolate* isolate = context->GetIsolate();
Local<String> source_text = ReadFile(isolate, file_name.c_str());
+ if (source_text.IsEmpty() && options.fuzzy_module_file_extensions) {
+ std::string fallback_file_name = file_name + ".js";
+ source_text = ReadFile(isolate, fallback_file_name.c_str());
+ if (source_text.IsEmpty()) {
+ fallback_file_name = file_name + ".mjs";
+ source_text = ReadFile(isolate, fallback_file_name.c_str());
+ }
+ }
if (source_text.IsEmpty()) {
std::string msg = "Error reading: " + file_name;
Throw(isolate, msg.c_str());
return MaybeLocal<Module>();
}
ScriptOrigin origin(
- String::NewFromUtf8(isolate, file_name.c_str(), NewStringType::kNormal)
- .ToLocalChecked(),
+ String::NewFromUtf8(isolate, file_name.c_str()).ToLocalChecked(),
Local<Integer>(), Local<Integer>(), Local<Boolean>(), Local<Integer>(),
Local<Value>(), Local<Boolean>(), Local<Boolean>(), True(isolate));
ScriptCompiler::Source source(source_text, origin);
@@ -815,10 +834,9 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
Local<String> name = module->GetModuleRequest(i);
std::string absolute_path =
NormalizePath(ToSTLString(isolate, name), dir_name);
- if (!d->specifier_to_module_map.count(absolute_path)) {
- if (FetchModuleTree(context, absolute_path).IsEmpty()) {
- return MaybeLocal<Module>();
- }
+ if (d->specifier_to_module_map.count(absolute_path)) continue;
+ if (FetchModuleTree(context, absolute_path).IsEmpty()) {
+ return MaybeLocal<Module>();
}
}
@@ -916,16 +934,6 @@ MaybeLocal<Promise> Shell::HostImportModuleDynamically(
return MaybeLocal<Promise>();
}
-void Shell::HostCleanupFinalizationGroup(Local<Context> context,
- Local<FinalizationGroup> fg) {
- Isolate* isolate = context->GetIsolate();
- PerIsolateData::Get(isolate)->HostCleanupFinalizationGroup(fg);
-}
-
-void PerIsolateData::HostCleanupFinalizationGroup(Local<FinalizationGroup> fg) {
- cleanup_finalization_groups_.emplace(isolate_, fg);
-}
-
void Shell::HostInitializeImportMetaObject(Local<Context> context,
Local<Module> module,
Local<Object> meta) {
@@ -938,10 +946,8 @@ void Shell::HostInitializeImportMetaObject(Local<Context> context,
CHECK(specifier_it != d->module_to_specifier_map.end());
Local<String> url_key =
- String::NewFromUtf8(isolate, "url", NewStringType::kNormal)
- .ToLocalChecked();
- Local<String> url = String::NewFromUtf8(isolate, specifier_it->second.c_str(),
- NewStringType::kNormal)
+ String::NewFromUtf8Literal(isolate, "url", NewStringType::kInternalized);
+ Local<String> url = String::NewFromUtf8(isolate, specifier_it->second.c_str())
.ToLocalChecked();
meta->CreateDataProperty(context, url_key, url).ToChecked();
}
@@ -1017,7 +1023,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
result_promise->Then(realm, callback_success, callback_failure)
.ToLocalChecked();
} else {
- // TODO(joshualitt): Clean up exception handling after introucing new
+ // TODO(cbruni): Clean up exception handling after introducing new
// API for evaluating async modules.
DCHECK(!try_catch.HasCaught());
resolver->Resolve(realm, module_namespace).ToChecked();
@@ -1062,17 +1068,17 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
}
if (i::FLAG_harmony_top_level_await) {
// Loop until module execution finishes
- // TODO(joshualitt): This is a bit wonky. "Real" engines would not be
+ // TODO(cbruni): This is a bit wonky. "Real" engines would not be
// able to just busy loop waiting for execution to finish.
Local<Promise> result_promise(Local<Promise>::Cast(result));
while (result_promise->State() == Promise::kPending) {
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
}
if (result_promise->State() == Promise::kRejected) {
// If the exception has been caught by the promise pipeline, we rethrow
// here in order to ReportException.
- // TODO(joshualitt): Clean this up after we create a new API for the case
+ // TODO(cbruni): Clean this up after we create a new API for the case
// where TLA is enabled.
if (!try_catch.HasCaught()) {
isolate->ThrowException(result_promise->Result());
@@ -1123,15 +1129,6 @@ MaybeLocal<Context> PerIsolateData::GetTimeoutContext() {
return result;
}
-MaybeLocal<FinalizationGroup> PerIsolateData::GetCleanupFinalizationGroup() {
- if (cleanup_finalization_groups_.empty())
- return MaybeLocal<FinalizationGroup>();
- Local<FinalizationGroup> result =
- cleanup_finalization_groups_.front().Get(isolate_);
- cleanup_finalization_groups_.pop();
- return result;
-}
-
PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
data_->realm_current_ = 0;
@@ -1281,8 +1278,11 @@ void Shell::DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
int index) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
+ Local<Context> context = data->realms_[index].Get(isolate);
+ DisposeModuleEmbedderData(context);
data->realms_[index].Reset();
+ // ContextDisposedNotification expects the disposed context to be entered.
+ v8::Context::Scope scope(context);
isolate->ContextDisposedNotification();
isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
}
@@ -1524,8 +1524,7 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
- Local<String> accumulator =
- String::NewFromUtf8(isolate, "", NewStringType::kNormal).ToLocalChecked();
+ Local<String> accumulator = String::NewFromUtf8Literal(isolate, "");
int length;
while (true) {
// Continue reading if the line ends with an escape '\\' or the line has
@@ -1574,9 +1573,7 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
if (!ExecuteString(
args.GetIsolate(), source,
- String::NewFromUtf8(args.GetIsolate(), *file,
- NewStringType::kNormal)
- .ToLocalChecked(),
+ String::NewFromUtf8(args.GetIsolate(), *file).ToLocalChecked(),
kNoPrintResult,
options.quiet_load ? kNoReportExceptions : kReportExceptions,
kNoProcessMessageQueue)) {
@@ -1750,10 +1747,9 @@ void Shell::NotifyDone(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(String::NewFromUtf8(args.GetIsolate(),
- V8::GetVersion(),
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetReturnValue().Set(
+ String::NewFromUtf8(args.GetIsolate(), V8::GetVersion())
+ .ToLocalChecked());
}
void Shell::ReportException(Isolate* isolate, Local<v8::Message> message,
@@ -1907,11 +1903,8 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
v8::Local<v8::Context>::New(isolate, evaluation_context_);
if (stringify_function_.IsEmpty()) {
Local<String> source =
- String::NewFromUtf8(isolate, stringify_source_, NewStringType::kNormal)
- .ToLocalChecked();
- Local<String> name =
- String::NewFromUtf8(isolate, "d8-stringify", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8(isolate, stringify_source_).ToLocalChecked();
+ Local<String> name = String::NewFromUtf8Literal(isolate, "d8-stringify");
ScriptOrigin origin(name);
Local<Script> script =
Script::Compile(context, source, &origin).ToLocalChecked();
@@ -1928,197 +1921,113 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
- global_template->Set(
- String::NewFromUtf8(isolate, "print", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Print));
- global_template->Set(
- String::NewFromUtf8(isolate, "printErr", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, PrintErr));
- global_template->Set(
- String::NewFromUtf8(isolate, "write", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Write));
- global_template->Set(
- String::NewFromUtf8(isolate, "read", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Read));
- global_template->Set(
- String::NewFromUtf8(isolate, "readbuffer", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, ReadBuffer));
- global_template->Set(
- String::NewFromUtf8(isolate, "readline", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, ReadLine));
- global_template->Set(
- String::NewFromUtf8(isolate, "load", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Load));
- global_template->Set(
- String::NewFromUtf8(isolate, "setTimeout", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, SetTimeout));
+ global_template->Set(isolate, "print", FunctionTemplate::New(isolate, Print));
+ global_template->Set(isolate, "printErr",
+ FunctionTemplate::New(isolate, PrintErr));
+ global_template->Set(isolate, "write", FunctionTemplate::New(isolate, Write));
+ global_template->Set(isolate, "read", FunctionTemplate::New(isolate, Read));
+ global_template->Set(isolate, "readbuffer",
+ FunctionTemplate::New(isolate, ReadBuffer));
+ global_template->Set(isolate, "readline",
+ FunctionTemplate::New(isolate, ReadLine));
+ global_template->Set(isolate, "load", FunctionTemplate::New(isolate, Load));
+ global_template->Set(isolate, "setTimeout",
+ FunctionTemplate::New(isolate, SetTimeout));
// Some Emscripten-generated code tries to call 'quit', which in turn would
// call C's exit(). This would lead to memory leaks, because there is no way
// we can terminate cleanly then, so we need a way to hide 'quit'.
if (!options.omit_quit) {
- global_template->Set(
- String::NewFromUtf8(isolate, "quit", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Quit));
+ global_template->Set(isolate, "quit", FunctionTemplate::New(isolate, Quit));
}
Local<ObjectTemplate> test_template = ObjectTemplate::New(isolate);
- global_template->Set(
- String::NewFromUtf8(isolate, "testRunner", NewStringType::kNormal)
- .ToLocalChecked(),
- test_template);
- test_template->Set(
- String::NewFromUtf8(isolate, "notifyDone", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, NotifyDone));
- test_template->Set(
- String::NewFromUtf8(isolate, "waitUntilDone", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, WaitUntilDone));
+ global_template->Set(isolate, "testRunner", test_template);
+ test_template->Set(isolate, "notifyDone",
+ FunctionTemplate::New(isolate, NotifyDone));
+ test_template->Set(isolate, "waitUntilDone",
+ FunctionTemplate::New(isolate, WaitUntilDone));
// Reliable access to quit functionality. The "quit" method function
// installed on the global object can be hidden with the --omit-quit flag
// (e.g. on asan bots).
- test_template->Set(
- String::NewFromUtf8(isolate, "quit", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Quit));
-
- global_template->Set(
- String::NewFromUtf8(isolate, "version", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, Version));
- global_template->Set(
- Symbol::GetToStringTag(isolate),
- String::NewFromUtf8(isolate, "global", NewStringType::kNormal)
- .ToLocalChecked());
+ test_template->Set(isolate, "quit", FunctionTemplate::New(isolate, Quit));
+
+ global_template->Set(isolate, "version",
+ FunctionTemplate::New(isolate, Version));
+ global_template->Set(Symbol::GetToStringTag(isolate),
+ String::NewFromUtf8Literal(isolate, "global"));
// Bind the Realm object.
Local<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
+ realm_template->Set(isolate, "current",
+ FunctionTemplate::New(isolate, RealmCurrent));
+ realm_template->Set(isolate, "owner",
+ FunctionTemplate::New(isolate, RealmOwner));
+ realm_template->Set(isolate, "global",
+ FunctionTemplate::New(isolate, RealmGlobal));
+ realm_template->Set(isolate, "create",
+ FunctionTemplate::New(isolate, RealmCreate));
realm_template->Set(
- String::NewFromUtf8(isolate, "current", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmCurrent));
- realm_template->Set(
- String::NewFromUtf8(isolate, "owner", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmOwner));
- realm_template->Set(
- String::NewFromUtf8(isolate, "global", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmGlobal));
- realm_template->Set(
- String::NewFromUtf8(isolate, "create", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmCreate));
- realm_template->Set(
- String::NewFromUtf8(isolate, "createAllowCrossRealmAccess",
- NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "createAllowCrossRealmAccess",
FunctionTemplate::New(isolate, RealmCreateAllowCrossRealmAccess));
- realm_template->Set(
- String::NewFromUtf8(isolate, "navigate", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmNavigate));
- realm_template->Set(
- String::NewFromUtf8(isolate, "detachGlobal", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmDetachGlobal));
- realm_template->Set(
- String::NewFromUtf8(isolate, "dispose", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmDispose));
- realm_template->Set(
- String::NewFromUtf8(isolate, "switch", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmSwitch));
- realm_template->Set(
- String::NewFromUtf8(isolate, "eval", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, RealmEval));
- realm_template->SetAccessor(
- String::NewFromUtf8(isolate, "shared", NewStringType::kNormal)
- .ToLocalChecked(),
- RealmSharedGet, RealmSharedSet);
- global_template->Set(
- String::NewFromUtf8(isolate, "Realm", NewStringType::kNormal)
- .ToLocalChecked(),
- realm_template);
+ realm_template->Set(isolate, "navigate",
+ FunctionTemplate::New(isolate, RealmNavigate));
+ realm_template->Set(isolate, "detachGlobal",
+ FunctionTemplate::New(isolate, RealmDetachGlobal));
+ realm_template->Set(isolate, "dispose",
+ FunctionTemplate::New(isolate, RealmDispose));
+ realm_template->Set(isolate, "switch",
+ FunctionTemplate::New(isolate, RealmSwitch));
+ realm_template->Set(isolate, "eval",
+ FunctionTemplate::New(isolate, RealmEval));
+ realm_template->SetAccessor(String::NewFromUtf8Literal(isolate, "shared"),
+ RealmSharedGet, RealmSharedSet);
+ global_template->Set(isolate, "Realm", realm_template);
Local<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
+ performance_template->Set(isolate, "now",
+ FunctionTemplate::New(isolate, PerformanceNow));
performance_template->Set(
- String::NewFromUtf8(isolate, "now", NewStringType::kNormal)
- .ToLocalChecked(),
- FunctionTemplate::New(isolate, PerformanceNow));
- performance_template->Set(
- String::NewFromUtf8(isolate, "measureMemory", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "measureMemory",
FunctionTemplate::New(isolate, PerformanceMeasureMemory));
- global_template->Set(
- String::NewFromUtf8(isolate, "performance", NewStringType::kNormal)
- .ToLocalChecked(),
- performance_template);
+ global_template->Set(isolate, "performance", performance_template);
Local<FunctionTemplate> worker_fun_template =
FunctionTemplate::New(isolate, WorkerNew);
Local<Signature> worker_signature =
Signature::New(isolate, worker_fun_template);
worker_fun_template->SetClassName(
- String::NewFromUtf8(isolate, "Worker", NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8Literal(isolate, "Worker"));
worker_fun_template->ReadOnlyPrototype();
worker_fun_template->PrototypeTemplate()->Set(
- String::NewFromUtf8(isolate, "terminate", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "terminate",
FunctionTemplate::New(isolate, WorkerTerminate, Local<Value>(),
worker_signature));
worker_fun_template->PrototypeTemplate()->Set(
- String::NewFromUtf8(isolate, "postMessage", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "postMessage",
FunctionTemplate::New(isolate, WorkerPostMessage, Local<Value>(),
worker_signature));
worker_fun_template->PrototypeTemplate()->Set(
- String::NewFromUtf8(isolate, "getMessage", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "getMessage",
FunctionTemplate::New(isolate, WorkerGetMessage, Local<Value>(),
worker_signature));
worker_fun_template->InstanceTemplate()->SetInternalFieldCount(1);
- global_template->Set(
- String::NewFromUtf8(isolate, "Worker", NewStringType::kNormal)
- .ToLocalChecked(),
- worker_fun_template);
+ global_template->Set(isolate, "Worker", worker_fun_template);
Local<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
AddOSMethods(isolate, os_templ);
- global_template->Set(
- String::NewFromUtf8(isolate, "os", NewStringType::kNormal)
- .ToLocalChecked(),
- os_templ);
+ global_template->Set(isolate, "os", os_templ);
if (i::FLAG_expose_async_hooks) {
Local<ObjectTemplate> async_hooks_templ = ObjectTemplate::New(isolate);
async_hooks_templ->Set(
- String::NewFromUtf8(isolate, "createHook", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "createHook",
FunctionTemplate::New(isolate, AsyncHooksCreateHook));
async_hooks_templ->Set(
- String::NewFromUtf8(isolate, "executionAsyncId", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "executionAsyncId",
FunctionTemplate::New(isolate, AsyncHooksExecutionAsyncId));
async_hooks_templ->Set(
- String::NewFromUtf8(isolate, "triggerAsyncId", NewStringType::kNormal)
- .ToLocalChecked(),
+ isolate, "triggerAsyncId",
FunctionTemplate::New(isolate, AsyncHooksTriggerAsyncId));
- global_template->Set(
- String::NewFromUtf8(isolate, "async_hooks", NewStringType::kNormal)
- .ToLocalChecked(),
- async_hooks_templ);
+ global_template->Set(isolate, "async_hooks", async_hooks_templ);
}
return global_template;
@@ -2158,17 +2067,27 @@ static void PrintMessageCallback(Local<Message> message, Local<Value> error) {
printf("%s:%i: %s\n", filename_string, linenum, msg_string);
}
-void Shell::Initialize(Isolate* isolate) {
- // Set up counters
- if (i::FLAG_map_counters[0] != '\0') {
- MapCounters(isolate, i::FLAG_map_counters);
+void Shell::Initialize(Isolate* isolate, D8Console* console,
+ bool isOnMainThread) {
+ if (isOnMainThread) {
+ // Set up counters
+ if (i::FLAG_map_counters[0] != '\0') {
+ MapCounters(isolate, i::FLAG_map_counters);
+ }
+ // Disable default message reporting.
+ isolate->AddMessageListenerWithErrorLevel(
+ PrintMessageCallback,
+ v8::Isolate::kMessageError | v8::Isolate::kMessageWarning |
+ v8::Isolate::kMessageInfo | v8::Isolate::kMessageDebug |
+ v8::Isolate::kMessageLog);
}
- // Disable default message reporting.
- isolate->AddMessageListenerWithErrorLevel(
- PrintMessageCallback,
- v8::Isolate::kMessageError | v8::Isolate::kMessageWarning |
- v8::Isolate::kMessageInfo | v8::Isolate::kMessageDebug |
- v8::Isolate::kMessageLog);
+
+ isolate->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
+ isolate->SetHostInitializeImportMetaObjectCallback(
+ Shell::HostInitializeImportMetaObject);
+
+ debug::SetConsoleDelegate(isolate, console);
}
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
@@ -2190,14 +2109,12 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Local<Array> array = Array::New(isolate, size);
for (int i = 0; i < size; i++) {
Local<String> arg =
- v8::String::NewFromUtf8(isolate, args[i], v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, args[i]).ToLocalChecked();
Local<Number> index = v8::Number::New(isolate, i);
array->Set(context, index, arg).FromJust();
}
- Local<String> name =
- String::NewFromUtf8(isolate, "arguments", NewStringType::kInternalized)
- .ToLocalChecked();
+ Local<String> name = String::NewFromUtf8Literal(
+ isolate, "arguments", NewStringType::kInternalized);
context->Global()->Set(context, name, array).FromJust();
}
return handle_scope.Escape(context);
@@ -2471,9 +2388,7 @@ void Shell::RunShell(Isolate* isolate) {
v8::Local<v8::Context>::New(isolate, evaluation_context_);
v8::Context::Scope context_scope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Local<String> name =
- String::NewFromUtf8(isolate, "(d8)", NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> name = String::NewFromUtf8Literal(isolate, "(d8)");
printf("V8 version %s\n", V8::GetVersion());
while (true) {
HandleScope inner_scope(isolate);
@@ -2525,9 +2440,8 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
reinterpret_cast<const uint16_t*>(string.characters16()),
v8::NewStringType::kNormal, length))
.ToLocalChecked();
- Local<String> callback_name =
- v8::String::NewFromUtf8(isolate_, "receive", v8::NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> callback_name = v8::String::NewFromUtf8Literal(
+ isolate_, "receive", NewStringType::kInternalized);
Local<Context> context = context_.Get(isolate_);
Local<Value> callback =
context->Global()->Get(context, callback_name).ToLocalChecked();
@@ -2539,14 +2453,10 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
#ifdef DEBUG
if (try_catch.HasCaught()) {
Local<Object> exception = Local<Object>::Cast(try_catch.Exception());
- Local<String> key = v8::String::NewFromUtf8(isolate_, "message",
- v8::NewStringType::kNormal)
- .ToLocalChecked();
- Local<String> expected =
- v8::String::NewFromUtf8(isolate_,
- "Maximum call stack size exceeded",
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> key = v8::String::NewFromUtf8Literal(
+ isolate_, "message", NewStringType::kInternalized);
+ Local<String> expected = v8::String::NewFromUtf8Literal(
+ isolate_, "Maximum call stack size exceeded");
Local<Value> value = exception->Get(context, key).ToLocalChecked();
DCHECK(value->StrictEquals(expected));
}
@@ -2575,9 +2485,8 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
FunctionTemplate::New(isolate_, SendInspectorMessage)
->GetFunction(context)
.ToLocalChecked();
- Local<String> function_name =
- String::NewFromUtf8(isolate_, "send", NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> function_name = String::NewFromUtf8Literal(
+ isolate_, "send", NewStringType::kInternalized);
CHECK(context->Global()->Set(context, function_name, function).FromJust());
context_.Reset(isolate_, context);
@@ -2586,10 +2495,8 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
void runMessageLoopOnPause(int contextGroupId) override {
v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate_);
v8::HandleScope handle_scope(isolate_);
- Local<String> callback_name =
- v8::String::NewFromUtf8(isolate_, "handleInspectorMessage",
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> callback_name = v8::String::NewFromUtf8Literal(
+ isolate_, "handleInspectorMessage", NewStringType::kInternalized);
Local<Context> context = context_.Get(isolate_);
Local<Value> callback =
context->Global()->Get(context, callback_name).ToLocalChecked();
@@ -2674,12 +2581,9 @@ bool SourceGroup::Execute(Isolate* isolate) {
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
HandleScope handle_scope(isolate);
- Local<String> file_name =
- String::NewFromUtf8(isolate, "unnamed", NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> file_name = String::NewFromUtf8Literal(isolate, "unnamed");
Local<String> source =
- String::NewFromUtf8(isolate, argv_[i + 1], NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8(isolate, argv_[i + 1]).ToLocalChecked();
Shell::set_script_executed();
if (!Shell::ExecuteString(isolate, source, file_name,
Shell::kNoPrintResult, Shell::kReportExceptions,
@@ -2713,8 +2617,7 @@ bool SourceGroup::Execute(Isolate* isolate) {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope(isolate);
Local<String> file_name =
- String::NewFromUtf8(isolate, arg, NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8(isolate, arg).ToLocalChecked();
Local<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
@@ -2742,15 +2645,10 @@ void SourceGroup::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
Isolate* isolate = Isolate::New(create_params);
- isolate->SetHostCleanupFinalizationGroupCallback(
- Shell::HostCleanupFinalizationGroup);
- isolate->SetHostImportModuleDynamicallyCallback(
- Shell::HostImportModuleDynamically);
- isolate->SetHostInitializeImportMetaObjectCallback(
- Shell::HostInitializeImportMetaObject);
Shell::SetWaitUntilDone(isolate, false);
D8Console console(isolate);
- debug::SetConsoleDelegate(isolate, &console);
+ Shell::Initialize(isolate, &console, false);
+
for (int i = 0; i < Shell::options.stress_runs; ++i) {
next_semaphore_.Wait();
{
@@ -2889,14 +2787,8 @@ void Worker::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
Isolate* isolate = Isolate::New(create_params);
- isolate->SetHostCleanupFinalizationGroupCallback(
- Shell::HostCleanupFinalizationGroup);
- isolate->SetHostImportModuleDynamicallyCallback(
- Shell::HostImportModuleDynamically);
- isolate->SetHostInitializeImportMetaObjectCallback(
- Shell::HostInitializeImportMetaObject);
D8Console console(isolate);
- debug::SetConsoleDelegate(isolate, &console);
+ Shell::Initialize(isolate, &console, false);
{
Isolate::Scope iscope(isolate);
{
@@ -2917,29 +2809,26 @@ void Worker::ExecuteInThread() {
&postmessage_fun)) {
global
->Set(context,
- String::NewFromUtf8(isolate, "postMessage",
- NewStringType::kNormal)
- .ToLocalChecked(),
+ v8::String::NewFromUtf8Literal(
+ isolate, "postMessage", NewStringType::kInternalized),
postmessage_fun)
.FromJust();
}
// First run the script
Local<String> file_name =
- String::NewFromUtf8(isolate, "unnamed", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(isolate, "unnamed");
Local<String> source =
- String::NewFromUtf8(isolate, script_, NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8(isolate, script_).ToLocalChecked();
if (Shell::ExecuteString(
isolate, source, file_name, Shell::kNoPrintResult,
Shell::kReportExceptions, Shell::kProcessMessageQueue)) {
// Get the message handler
Local<Value> onmessage =
global
- ->Get(context, String::NewFromUtf8(isolate, "onmessage",
- NewStringType::kNormal)
- .ToLocalChecked())
+ ->Get(context,
+ String::NewFromUtf8Literal(
+ isolate, "onmessage", NewStringType::kInternalized))
.ToLocalChecked();
if (onmessage->IsFunction()) {
Local<Function> onmessage_fun = Local<Function>::Cast(onmessage);
@@ -3147,6 +3036,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.cpu_profiler = true;
options.cpu_profiler_print = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--fuzzy-module-file-extensions") == 0) {
+ options.fuzzy_module_file_extensions = true;
+ argv[i] = nullptr;
}
}
@@ -3272,21 +3164,6 @@ bool RunSetTimeoutCallback(Isolate* isolate, bool* did_run) {
return true;
}
-bool RunCleanupFinalizationGroupCallback(Isolate* isolate, bool* did_run) {
- PerIsolateData* data = PerIsolateData::Get(isolate);
- HandleScope handle_scope(isolate);
- while (true) {
- Local<FinalizationGroup> fg;
- if (!data->GetCleanupFinalizationGroup().ToLocal(&fg)) return true;
- *did_run = true;
- TryCatch try_catch(isolate);
- try_catch.SetVerbose(true);
- if (FinalizationGroup::Cleanup(fg).IsNothing()) {
- return false;
- }
- }
-}
-
bool ProcessMessages(
Isolate* isolate,
const std::function<platform::MessageLoopBehavior()>& behavior) {
@@ -3297,23 +3174,17 @@ bool ProcessMessages(
while (v8::platform::PumpMessageLoop(g_default_platform, isolate,
behavior())) {
MicrotasksScope::PerformCheckpoint(isolate);
- isolate->ClearKeptObjects();
}
if (g_default_platform->IdleTasksEnabled(isolate)) {
v8::platform::RunIdleTasks(g_default_platform, isolate,
50.0 / base::Time::kMillisecondsPerSecond);
}
- bool ran_finalization_callback = false;
- if (!RunCleanupFinalizationGroupCallback(isolate,
- &ran_finalization_callback)) {
- return false;
- }
bool ran_set_timeout = false;
if (!RunSetTimeoutCallback(isolate, &ran_set_timeout)) {
return false;
}
- if (!ran_set_timeout && !ran_finalization_callback) return true;
+ if (!ran_set_timeout) return true;
}
return true;
}
@@ -3768,19 +3639,12 @@ int Shell::Main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::New(create_params);
- isolate->SetHostCleanupFinalizationGroupCallback(
- Shell::HostCleanupFinalizationGroup);
- isolate->SetHostImportModuleDynamicallyCallback(
- Shell::HostImportModuleDynamically);
- isolate->SetHostInitializeImportMetaObjectCallback(
- Shell::HostInitializeImportMetaObject);
- D8Console console(isolate);
{
+ D8Console console(isolate);
Isolate::Scope scope(isolate);
- Initialize(isolate);
+ Initialize(isolate, &console);
PerIsolateData data(isolate);
- debug::SetConsoleDelegate(isolate, &console);
if (options.trace_enabled) {
platform::tracing::TraceConfig* trace_config;
@@ -3832,16 +3696,9 @@ int Shell::Main(int argc, char* argv[]) {
i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
Isolate* isolate2 = Isolate::New(create_params);
i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
- isolate2->SetHostCleanupFinalizationGroupCallback(
- Shell::HostCleanupFinalizationGroup);
- isolate2->SetHostImportModuleDynamicallyCallback(
- Shell::HostImportModuleDynamically);
- isolate2->SetHostInitializeImportMetaObjectCallback(
- Shell::HostInitializeImportMetaObject);
{
D8Console console(isolate2);
- Initialize(isolate2);
- debug::SetConsoleDelegate(isolate2, &console);
+ Initialize(isolate2, &console);
PerIsolateData data(isolate2);
Isolate::Scope isolate_scope(isolate2);
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 435eb6dbd8..2d27f62e58 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -23,6 +23,8 @@
namespace v8 {
+class D8Console;
+
// A single counter in a counter collection.
class Counter {
public:
@@ -226,8 +228,6 @@ class PerIsolateData {
PerIsolateData* data_;
};
- inline void HostCleanupFinalizationGroup(Local<FinalizationGroup> fg);
- inline MaybeLocal<FinalizationGroup> GetCleanupFinalizationGroup();
inline void SetTimeout(Local<Function> callback, Local<Context> context);
inline MaybeLocal<Function> GetTimeoutCallback();
inline MaybeLocal<Context> GetTimeoutContext();
@@ -245,7 +245,6 @@ class PerIsolateData {
Global<Value> realm_shared_;
std::queue<Global<Function>> set_timeout_callbacks_;
std::queue<Global<Context>> set_timeout_contexts_;
- std::queue<Global<FinalizationGroup>> cleanup_finalization_groups_;
AsyncHooks* async_hooks_wrapper_;
int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
@@ -299,6 +298,7 @@ class ShellOptions {
bool include_arguments = true;
bool cpu_profiler = false;
bool cpu_profiler_print = false;
+ bool fuzzy_module_file_extensions = true;
};
class Shell : public i::AllStatic {
@@ -423,8 +423,6 @@ class Shell : public i::AllStatic {
static void SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args);
static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void HostCleanupFinalizationGroup(Local<Context> context,
- Local<FinalizationGroup> fg);
static MaybeLocal<Promise> HostImportModuleDynamically(
Local<Context> context, Local<ScriptOrModule> referrer,
Local<String> specifier);
@@ -461,6 +459,9 @@ class Shell : public i::AllStatic {
static void AddRunningWorker(std::shared_ptr<Worker> worker);
static void RemoveRunningWorker(const std::shared_ptr<Worker>& worker);
+ static void Initialize(Isolate* isolate, D8Console* console,
+ bool isOnMainThread = true);
+
private:
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
@@ -487,7 +488,6 @@ class Shell : public i::AllStatic {
static void WriteLcovData(v8::Isolate* isolate, const char* file);
static Counter* GetCounter(const char* name, bool is_histogram);
static Local<String> Stringify(Isolate* isolate, Local<Value> value);
- static void Initialize(Isolate* isolate);
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
diff --git a/deps/v8/src/date/date.cc b/deps/v8/src/date/date.cc
index 825c5040d3..250539e24c 100644
--- a/deps/v8/src/date/date.cc
+++ b/deps/v8/src/date/date.cc
@@ -66,7 +66,7 @@ void DateCache::ResetDateCache(
// ECMA 262 - ES#sec-timeclip TimeClip (time)
double DateCache::TimeClip(double time) {
if (-kMaxTimeInMs <= time && time <= kMaxTimeInMs) {
- return DoubleToInteger(time) + 0.0;
+ return DoubleToInteger(time);
}
return std::numeric_limits<double>::quiet_NaN();
}
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 96cd8a7b74..251856e284 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -38,7 +38,7 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ Mov(sp, fp);
- __ Pop(fp, lr); // Frame, Return address.
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
__ LoadTaggedPointerField(
x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 5f368683f2..9b359fde36 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -74,9 +74,9 @@ std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo shared) {
CoverageInfo::cast(shared.GetDebugInfo().coverage_info());
std::vector<CoverageBlock> result;
- if (coverage_info.SlotCount() == 0) return result;
+ if (coverage_info.slot_count() == 0) return result;
- for (int i = 0; i < coverage_info.SlotCount(); i++) {
+ for (int i = 0; i < coverage_info.slot_count(); i++) {
const int start_pos = coverage_info.StartSourcePosition(i);
const int until_pos = coverage_info.EndSourcePosition(i);
const int count = coverage_info.BlockCount(i);
@@ -385,7 +385,7 @@ void ResetAllBlockCounts(SharedFunctionInfo shared) {
CoverageInfo coverage_info =
CoverageInfo::cast(shared.GetDebugInfo().coverage_info());
- for (int i = 0; i < coverage_info.SlotCount(); i++) {
+ for (int i = 0; i < coverage_info.slot_count(); i++) {
coverage_info.ResetBlockCount(i);
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 5a8b68305b..fcf9b8448a 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -265,6 +265,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
/* Type checks */ \
V(IsArray) \
V(IsFunction) \
+ V(IsJSProxy) \
V(IsJSReceiver) \
V(IsRegExp) \
V(IsSmi) \
@@ -478,6 +479,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kForInContinue:
case Bytecode::kForInNext:
case Bytecode::kForInStep:
+ case Bytecode::kJumpLoop:
case Bytecode::kThrow:
case Bytecode::kReThrow:
case Bytecode::kThrowReferenceErrorIfHole:
@@ -485,7 +487,6 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kThrowSuperAlreadyCalledIfNotHole:
case Bytecode::kIllegal:
case Bytecode::kCallJSRuntime:
- case Bytecode::kStackCheck:
case Bytecode::kReturn:
case Bytecode::kSetPendingMessage:
return true;
@@ -948,6 +949,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
case Builtins::kArraySomeLoopContinuation:
case Builtins::kArrayTimSort:
case Builtins::kCall_ReceiverIsAny:
+ case Builtins::kCall_ReceiverIsNotNullOrUndefined:
case Builtins::kCall_ReceiverIsNullOrUndefined:
case Builtins::kCallWithArrayLike:
case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
@@ -991,7 +993,6 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
case Builtins::kStringIndexOf:
case Builtins::kStringRepeat:
case Builtins::kToInteger:
- case Builtins::kToInteger_TruncateMinusZero:
case Builtins::kToLength:
case Builtins::kToName:
case Builtins::kToObject:
@@ -1065,7 +1066,8 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
}
}
CHECK(!failed);
-#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_MIPS64)
+#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
+ defined(V8_TARGET_ARCH_MIPS64)
// Isolate-independent builtin calls and jumps do not emit reloc infos
// on PPC. We try to avoid using PC relative code due to performance
// issue with especially older hardwares.
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 732001327f..f6cfe31d32 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -101,6 +101,13 @@ void PrepareStep(Isolate* isolate, StepAction action);
void ClearStepping(Isolate* isolate);
V8_EXPORT_PRIVATE void BreakRightNow(Isolate* isolate);
+// Use `SetTerminateOnResume` to indicate that an TerminateExecution interrupt
+// should be set shortly before resuming, i.e. shortly before returning into
+// the JavaScript stack frames on the stack. In contrast to setting the
+// interrupt with `RequestTerminateExecution` directly, this flag allows
+// the isolate to be entered for further JavaScript execution.
+V8_EXPORT_PRIVATE void SetTerminateOnResume(Isolate* isolate);
+
bool AllFramesOnStackAreBlackboxed(Isolate* isolate);
class Script;
@@ -174,6 +181,9 @@ class WasmScript : public Script {
int GetContainingFunction(int byte_offset) const;
uint32_t GetFunctionHash(int function_index);
+
+ int CodeOffset() const;
+ int CodeLength() const;
};
V8_EXPORT_PRIVATE void GetLoadedScripts(
@@ -209,6 +219,9 @@ class DebugDelegate {
V8_EXPORT_PRIVATE void SetDebugDelegate(Isolate* isolate,
DebugDelegate* listener);
+V8_EXPORT_PRIVATE void TierDownAllModulesPerIsolate(Isolate* isolate);
+V8_EXPORT_PRIVATE void TierUpAllModulesPerIsolate(Isolate* isolate);
+
class AsyncEventDelegate {
public:
virtual ~AsyncEventDelegate() = default;
@@ -410,7 +423,8 @@ class V8_EXPORT_PRIVATE ScopeIterator {
ScopeTypeBlock,
ScopeTypeScript,
ScopeTypeEval,
- ScopeTypeModule
+ ScopeTypeModule,
+ ScopeTypeWasmExpressionStack
};
virtual bool Done() = 0;
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index 1f6d9e0fd1..28d8ed0c8f 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -137,17 +137,24 @@ DebugWasmScopeIterator::DebugWasmScopeIterator(Isolate* isolate,
type_(debug::ScopeIterator::ScopeTypeGlobal) {}
bool DebugWasmScopeIterator::Done() {
- return type_ != debug::ScopeIterator::ScopeTypeGlobal &&
- type_ != debug::ScopeIterator::ScopeTypeLocal;
+ return type_ == debug::ScopeIterator::ScopeTypeWith;
}
void DebugWasmScopeIterator::Advance() {
DCHECK(!Done());
- if (type_ == debug::ScopeIterator::ScopeTypeGlobal) {
- type_ = debug::ScopeIterator::ScopeTypeLocal;
- } else {
- // We use ScopeTypeWith type as marker for done.
- type_ = debug::ScopeIterator::ScopeTypeWith;
+ switch (type_) {
+ case ScopeTypeGlobal:
+ type_ = debug::ScopeIterator::ScopeTypeLocal;
+ break;
+ case ScopeTypeLocal:
+ type_ = debug::ScopeIterator::ScopeTypeWasmExpressionStack;
+ break;
+ case ScopeTypeWasmExpressionStack:
+ // We use ScopeTypeWith type as marker for done.
+ type_ = debug::ScopeIterator::ScopeTypeWith;
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -176,7 +183,21 @@ v8::Local<v8::Object> DebugWasmScopeIterator::GetObject() {
wasm::DebugInfo* debug_info =
WasmCompiledFrame::cast(frame_)->native_module()->GetDebugInfo();
return Utils::ToLocal(debug_info->GetLocalScopeObject(
- isolate_, frame_->pc(), frame_->fp()));
+ isolate_, frame_->pc(), frame_->fp(), frame_->callee_fp()));
+ }
+ case debug::ScopeIterator::ScopeTypeWasmExpressionStack: {
+ if (frame_->is_wasm_interpreter_entry()) {
+ Handle<WasmDebugInfo> debug_info(
+ WasmInterpreterEntryFrame::cast(frame_)->debug_info(), isolate_);
+ return Utils::ToLocal(WasmDebugInfo::GetStackScopeObject(
+ debug_info, frame_->fp(), inlined_frame_index_));
+ }
+ // Compiled code.
+ DCHECK(frame_->is_wasm_compiled());
+ wasm::DebugInfo* debug_info =
+ WasmCompiledFrame::cast(frame_)->native_module()->GetDebugInfo();
+ return Utils::ToLocal(debug_info->GetStackScopeObject(
+ isolate_, frame_->pc(), frame_->fp(), frame_->callee_fp()));
}
default:
return {};
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 58182941c8..c5061ecb81 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -244,10 +244,11 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
info_->set_eager();
}
+ MaybeHandle<ScopeInfo> maybe_outer_scope;
if (scope_info->scope_type() == EVAL_SCOPE || script->is_wrapped()) {
info_->set_eval();
if (!context_->IsNativeContext()) {
- info_->set_outer_scope_info(handle(context_->scope_info(), isolate_));
+ maybe_outer_scope = handle(context_->scope_info(), isolate_);
}
// Language mode may be inherited from the eval caller.
// Retrieve it from shared function info.
@@ -259,9 +260,13 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
scope_info->scope_type() == FUNCTION_SCOPE);
}
- if (parsing::ParseAny(info_, shared_info, isolate_) &&
- Rewriter::Rewrite(info_)) {
- info_->ast_value_factory()->Internalize(isolate_->factory());
+ const bool parse_result =
+ info_->is_toplevel()
+ ? parsing::ParseProgram(info_, script, maybe_outer_scope, isolate_)
+ : parsing::ParseFunction(info_, shared_info, isolate_);
+
+ if (parse_result && Rewriter::Rewrite(info_)) {
+ info_->ast_value_factory()->Internalize(isolate_);
DeclarationScope* literal_scope = info_->literal()->scope();
ScopeChainRetriever scope_chain_retriever(literal_scope, function_,
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 449ac490d0..a7a3d2fd81 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -37,6 +37,7 @@
#include "src/objects/js-promise-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -871,8 +872,9 @@ void Debug::PrepareStepIn(Handle<JSFunction> function) {
if (in_debug_scope()) return;
if (break_disabled()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
- // If stepping from JS into Wasm, prepare for it.
- if (shared->HasWasmExportedFunctionData()) {
+ // If stepping from JS into Wasm, and we are using the wasm interpreter for
+ // debugging, prepare the interpreter for step in.
+ if (shared->HasWasmExportedFunctionData() && !FLAG_debug_in_liftoff) {
auto imported_function = Handle<WasmExportedFunction>::cast(function);
Handle<WasmInstanceObject> wasm_instance(imported_function->instance(),
isolate_);
@@ -995,19 +997,6 @@ void Debug::PrepareStep(StepAction step_action) {
StackTraceFrameIterator frames_it(isolate_, frame_id);
StandardFrame* frame = frames_it.frame();
- // Handle stepping in wasm functions via the wasm interpreter.
- if (frame->is_wasm_interpreter_entry()) {
- WasmInterpreterEntryFrame* wasm_frame =
- WasmInterpreterEntryFrame::cast(frame);
- if (wasm_frame->NumberOfActiveFrames() > 0) {
- wasm_frame->debug_info().PrepareStep(step_action);
- return;
- }
- }
- // If this is wasm, but there are no interpreted frames on top, all we can do
- // is step out.
- if (frame->is_wasm()) step_action = StepOut;
-
BreakLocation location = BreakLocation::Invalid();
Handle<SharedFunctionInfo> shared;
int current_frame_count = CurrentFrameCount();
@@ -1051,6 +1040,26 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.last_frame_count_ = current_frame_count;
// No longer perform the current async step.
clear_suspended_generator();
+ } else if (frame->is_wasm_interpreter_entry()) {
+ // Handle stepping in wasm functions via the wasm interpreter.
+ WasmInterpreterEntryFrame* wasm_frame =
+ WasmInterpreterEntryFrame::cast(frame);
+ if (wasm_frame->NumberOfActiveFrames() > 0) {
+ wasm_frame->debug_info().PrepareStep(step_action);
+ return;
+ }
+ } else if (FLAG_debug_in_liftoff && frame->is_wasm_compiled()) {
+ // Handle stepping in Liftoff code.
+ WasmCompiledFrame* wasm_frame = WasmCompiledFrame::cast(frame);
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmCode* code = wasm_frame->wasm_code();
+ if (code->is_liftoff()) {
+ wasm_frame->native_module()->GetDebugInfo()->PrepareStep(isolate_,
+ frame_id);
+ }
+ // In case the wasm code returns, prepare the next frame (if JS) to break.
+ step_action = StepOut;
+ UpdateHookOnFunctionCall();
}
switch (step_action) {
@@ -1718,8 +1727,8 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
return FixedArray::ShrinkOrEmpty(isolate_, results, length);
}
-void Debug::OnThrow(Handle<Object> exception) {
- if (in_debug_scope() || ignore_events()) return;
+base::Optional<Object> Debug::OnThrow(Handle<Object> exception) {
+ if (in_debug_scope() || ignore_events()) return {};
// Temporarily clear any scheduled_exception to allow evaluating
// JavaScript from the debug event handler.
HandleScope scope(isolate_);
@@ -1736,6 +1745,14 @@ void Debug::OnThrow(Handle<Object> exception) {
isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
}
PrepareStepOnThrow();
+ // If the OnException handler requested termination, then indicated this to
+ // our caller Isolate::Throw so it can deal with it immediatelly instead of
+ // throwing the original exception.
+ if (isolate_->stack_guard()->CheckTerminateExecution()) {
+ isolate_->stack_guard()->ClearTerminateExecution();
+ return isolate_->TerminateExecution();
+ }
+ return {};
}
void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
@@ -2060,7 +2077,7 @@ void Debug::PrintBreakLocation() {
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
Handle<String> source(String::cast(script->source()), isolate_);
- Script::InitLineEnds(script);
+ Script::InitLineEnds(isolate_, script);
int line =
Script::GetLineNumber(script, source_position) - script->line_offset();
int column = Script::GetColumnNumber(script, source_position) -
@@ -2092,7 +2109,6 @@ DebugScope::DebugScope(Debug* debug)
// Link recursive debugger entry.
base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(this));
-
// Store the previous frame id and return value.
break_frame_id_ = debug_->break_frame_id();
@@ -2106,8 +2122,18 @@ DebugScope::DebugScope(Debug* debug)
debug_->UpdateState();
}
+void DebugScope::set_terminate_on_resume() { terminate_on_resume_ = true; }
DebugScope::~DebugScope() {
+ // Terminate on resume must have been handled by retrieving it, if this is
+ // the outer scope.
+ if (terminate_on_resume_) {
+ if (!prev_) {
+ debug_->isolate_->stack_guard()->RequestTerminateExecution();
+ } else {
+ prev_->set_terminate_on_resume();
+ }
+ }
// Leaving this debugger entry.
base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(prev_));
@@ -2147,6 +2173,13 @@ void Debug::UpdateDebugInfosForExecutionMode() {
}
}
+void Debug::SetTerminateOnResume() {
+ DebugScope* scope = reinterpret_cast<DebugScope*>(
+ base::Acquire_Load(&thread_local_.current_debug_scope_));
+ CHECK_NOT_NULL(scope);
+ scope->set_terminate_on_resume();
+}
+
void Debug::StartSideEffectCheckMode() {
DCHECK(isolate_->debug_execution_mode() != DebugInfo::kSideEffects);
isolate_->set_debug_execution_mode(DebugInfo::kSideEffects);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 8a63b1044e..4bb2008c4d 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -217,7 +217,8 @@ class V8_EXPORT_PRIVATE Debug {
// Debug event triggers.
void OnDebugBreak(Handle<FixedArray> break_points_hit);
- void OnThrow(Handle<Object> exception);
+ base::Optional<Object> OnThrow(Handle<Object> exception)
+ V8_WARN_UNUSED_RESULT;
void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
void OnCompileError(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
@@ -238,6 +239,8 @@ class V8_EXPORT_PRIVATE Debug {
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
+ void SetTerminateOnResume();
+
bool SetBreakPointForScript(Handle<Script> script, Handle<String> condition,
int* source_position, int* id);
bool SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
@@ -564,6 +567,8 @@ class DebugScope {
explicit DebugScope(Debug* debug);
~DebugScope();
+ void set_terminate_on_resume();
+
private:
Isolate* isolate() { return debug_->isolate_; }
@@ -571,6 +576,8 @@ class DebugScope {
DebugScope* prev_; // Previous scope if entered recursively.
StackFrameId break_frame_id_; // Previous break frame id.
PostponeInterruptsScope no_interrupts_;
+ // This is used as a boolean.
+ bool terminate_on_resume_ = false;
};
// This scope is used to handle return values in nested debug break points.
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 99063ba8f6..aad5f5d9e6 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -761,7 +761,7 @@ bool ParseScript(Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
success = parsing::ParseProgram(parse_info, script, isolate);
if (success) {
success = Compiler::Analyze(parse_info);
- parse_info->ast_value_factory()->Internalize(isolate->factory());
+ parse_info->ast_value_factory()->Internalize(isolate);
}
}
if (!success) {
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index d06aa67bb8..be32d3ba28 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/debug/debug.h"
@@ -50,4 +50,4 @@ const bool LiveEdit::kFrameDropperSupported = true;
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index 0230fcaa83..7d08e13678 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -38,7 +38,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LoadRR(fp, r3);
__ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadLogicalHalfWordP(
r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadRR(r4, r2);
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc
new file mode 100644
index 0000000000..a9f1b58f6c
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc
@@ -0,0 +1,118 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/gdb-server-thread.h"
+#include "src/debug/wasm/gdb-server/gdb-server.h"
+#include "src/debug/wasm/gdb-server/session.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+GdbServerThread::GdbServerThread(GdbServer* gdb_server)
+ : Thread(v8::base::Thread::Options("GdbServerThread")),
+ gdb_server_(gdb_server),
+ start_semaphore_(0) {}
+
+bool GdbServerThread::StartAndInitialize() {
+ // Executed in the Isolate thread.
+ if (!Start()) {
+ return false;
+ }
+
+ // We need to make sure that {Stop} is never called before the thread has
+ // completely initialized {transport_} and {target_}. Otherwise there could be
+ // a race condition where in the main thread {Stop} might get called before
+ // the transport is created, and then in the GDBServer thread we may have time
+ // to setup the transport and block on accept() before the main thread blocks
+ // on joining the thread.
+ // The small performance hit caused by this Wait should be negligeable because
+ // this operation happensat most once per process and only when the
+ // --wasm-gdb-remote flag is set.
+ start_semaphore_.Wait();
+ return true;
+}
+
+void GdbServerThread::CleanupThread() {
+ // Executed in the GdbServer thread.
+ v8::base::MutexGuard guard(&mutex_);
+
+ target_ = nullptr;
+ transport_ = nullptr;
+
+#if _WIN32
+ ::WSACleanup();
+#endif
+}
+
+void GdbServerThread::Run() {
+ // Executed in the GdbServer thread.
+#ifdef _WIN32
+ // Initialize Winsock
+ WSADATA wsaData;
+ int iResult = ::WSAStartup(MAKEWORD(2, 2), &wsaData);
+ if (iResult != 0) {
+ TRACE_GDB_REMOTE("GdbServerThread::Run: WSAStartup failed\n");
+ return;
+ }
+#endif
+
+ // If the default port is not available, try any port.
+ SocketBinding socket_binding = SocketBinding::Bind(FLAG_wasm_gdb_remote_port);
+ if (!socket_binding.IsValid()) {
+ socket_binding = SocketBinding::Bind(0);
+ }
+ if (!socket_binding.IsValid()) {
+ TRACE_GDB_REMOTE("GdbServerThread::Run: Failed to bind any TCP port\n");
+ return;
+ }
+ TRACE_GDB_REMOTE("gdb-remote(%d) : Connect GDB with 'target remote :%d\n",
+ __LINE__, socket_binding.GetBoundPort());
+
+ transport_ = socket_binding.CreateTransport();
+ target_ = std::make_unique<Target>(gdb_server_);
+
+ // Here we have completed the initialization, and the thread that called
+ // {StartAndInitialize} may resume execution.
+ start_semaphore_.Signal();
+
+ while (!target_->IsTerminated()) {
+ // Wait for incoming connections.
+ if (!transport_->AcceptConnection()) {
+ continue;
+ }
+
+ // Create a new session for this connection
+ Session session(transport_.get());
+ TRACE_GDB_REMOTE("GdbServerThread: Connected\n");
+
+ // Run this session for as long as it lasts
+ target_->Run(&session);
+ }
+ CleanupThread();
+}
+
+void GdbServerThread::Stop() {
+ // Executed in the Isolate thread.
+
+ // Synchronized, becauses {Stop} might be called while {Run} is still
+ // initializing {transport_} and {target_}. If this happens and the thread is
+ // blocked waiting for an incoming connection or GdbServer for incoming
+ // packets, it will unblocked when {transport_} is closed.
+ v8::base::MutexGuard guard(&mutex_);
+
+ if (target_) {
+ target_->Terminate();
+ }
+
+ if (transport_) {
+ transport_->Close();
+ }
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h
new file mode 100644
index 0000000000..f31756cbb3
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h
@@ -0,0 +1,61 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_THREAD_H_
+#define V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_THREAD_H_
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/debug/wasm/gdb-server/target.h"
+#include "src/debug/wasm/gdb-server/transport.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+class GdbServer;
+
+// class GdbServerThread spawns a thread where all communication with a debugger
+// happens.
+class GdbServerThread : public v8::base::Thread {
+ public:
+ explicit GdbServerThread(GdbServer* gdb_server);
+
+ // base::Thread
+ void Run() override;
+
+ // Starts the GDB-server thread and waits Run() method is called on the new
+ // thread and the initialization completes.
+ bool StartAndInitialize();
+
+ // Stops the GDB-server thread when the V8 process shuts down; gracefully
+ // closes any active debugging session.
+ void Stop();
+
+ private:
+ void CleanupThread();
+
+ GdbServer* gdb_server_;
+
+ // Used to block the caller on StartAndInitialize() waiting for the new thread
+ // to have completed its initialization.
+ // (Note that Thread::StartSynchronously() wouldn't work in this case because
+ // it returns as soon as the new thread starts, but before Run() is called).
+ base::Semaphore start_semaphore_;
+
+ base::Mutex mutex_;
+ // Protected by {mutex_}:
+ std::unique_ptr<Transport> transport_;
+ std::unique_ptr<Target> target_;
+
+ DISALLOW_COPY_AND_ASSIGN(GdbServerThread);
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_THREAD_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
new file mode 100644
index 0000000000..bad7f439eb
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/gdb-server.h"
+
+#include "src/debug/wasm/gdb-server/gdb-server-thread.h"
+#include "src/wasm/wasm-engine.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+GdbServer::GdbServer() {
+ DCHECK(!thread_);
+ DCHECK(FLAG_wasm_gdb_remote);
+
+ thread_ = std::make_unique<GdbServerThread>(this);
+ // TODO(paolosev): does StartSynchronously hurt performances?
+ if (!thread_->StartAndInitialize()) {
+ TRACE_GDB_REMOTE(
+ "Cannot initialize thread, GDB-remote debugging will be disabled.\n");
+ thread_ = nullptr;
+ }
+}
+
+GdbServer::~GdbServer() {
+ if (thread_) {
+ thread_->Stop();
+ thread_->Join();
+ }
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.h b/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
new file mode 100644
index 0000000000..59d8a17375
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
@@ -0,0 +1,43 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_H_
+#define V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_H_
+
+#include <memory>
+#include "src/debug/wasm/gdb-server/gdb-server-thread.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+// class GdbServer acts as a manager for the GDB-remote stub. It is instantiated
+// as soon as the first Wasm module is loaded in the Wasm engine and spawns a
+// separate thread to accept connections and exchange messages with a debugger.
+// It will contain the logic to serve debugger queries and access the state of
+// the Wasm engine.
+class GdbServer {
+ public:
+ // Spawns a "GDB-remote" thread that will be used to communicate with the
+ // debugger. This should be called once, the first time a Wasm module is
+ // loaded in the Wasm engine.
+ GdbServer();
+
+ // Stops the "GDB-remote" thread and waits for it to complete. This should be
+ // called once, when the Wasm engine shuts down.
+ ~GdbServer();
+
+ private:
+ std::unique_ptr<GdbServerThread> thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(GdbServer);
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/session.cc b/deps/v8/src/debug/wasm/gdb-server/session.cc
new file mode 100644
index 0000000000..3e98e093af
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/session.cc
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/session.h"
+#include "src/debug/wasm/gdb-server/transport.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+Session::Session(Transport* transport) : io_(transport), connected_(true) {}
+
+void Session::WaitForDebugStubEvent() { io_->WaitForDebugStubEvent(); }
+
+bool Session::SignalThreadEvent() { return io_->SignalThreadEvent(); }
+
+bool Session::IsDataAvailable() const { return io_->IsDataAvailable(); }
+
+bool Session::IsConnected() const { return connected_; }
+
+void Session::Disconnect() {
+ io_->Disconnect();
+ connected_ = false;
+}
+
+bool Session::GetChar(char* ch) {
+ if (!io_->Read(ch, 1)) {
+ Disconnect();
+ return false;
+ }
+
+ return true;
+}
+
+bool Session::GetPacket() {
+ char ch;
+ if (!GetChar(&ch)) return false;
+
+ // discard the input
+ return true;
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/session.h b/deps/v8/src/debug/wasm/gdb-server/session.h
new file mode 100644
index 0000000000..a76bb4a3b4
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/session.h
@@ -0,0 +1,59 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_SESSION_H_
+#define V8_DEBUG_WASM_GDB_SERVER_SESSION_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+class Transport;
+
+// Represents a gdb-remote debugging session.
+class Session {
+ public:
+ explicit Session(Transport* transport);
+
+ // Attempt to receive a packet.
+ // For the moment this method is only used to check whether the TCP connection
+ // is still active; all bytes read are discarded.
+ bool GetPacket();
+
+ // Return true if there is data to read.
+ bool IsDataAvailable() const;
+
+ // Return true if the connection still valid.
+ bool IsConnected() const;
+
+ // Shutdown the connection.
+ void Disconnect();
+
+ // When a debugging session is active, the GDB-remote thread can block waiting
+ // for events and it will resume execution when one of these two events arise:
+ // - A network event (a new packet arrives, or the connection is dropped)
+ // - A thread event (the execution stopped because of a trap or breakpoint).
+ void WaitForDebugStubEvent();
+
+ // Signal that the debuggee execution stopped because of a trap or breakpoint.
+ bool SignalThreadEvent();
+
+ private:
+ bool GetChar(char* ch);
+
+ Transport* io_; // Transport object not owned by the Session.
+ bool connected_; // Is the connection still valid.
+
+ DISALLOW_COPY_AND_ASSIGN(Session);
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_SESSION_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/target.cc b/deps/v8/src/debug/wasm/gdb-server/target.cc
new file mode 100644
index 0000000000..ac5bf10a0e
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/target.cc
@@ -0,0 +1,65 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/target.h"
+
+#include "src/base/platform/time.h"
+#include "src/debug/wasm/gdb-server/gdb-server.h"
+#include "src/debug/wasm/gdb-server/session.h"
+#include "src/debug/wasm/gdb-server/transport.h"
+#include "src/debug/wasm/gdb-server/util.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+Target::Target(GdbServer* gdb_server)
+ : status_(Status::Running), session_(nullptr) {}
+
+void Target::Terminate() {
+ // Executed in the Isolate thread.
+ status_ = Status::Terminated;
+}
+
+void Target::Run(Session* session) {
+ // Executed in the GdbServer thread.
+
+ session_ = session;
+ do {
+ WaitForDebugEvent();
+ ProcessCommands();
+ } while (!IsTerminated() && session_->IsConnected());
+ session_ = nullptr;
+}
+
+void Target::WaitForDebugEvent() {
+ // Executed in the GdbServer thread.
+
+ if (status_ != Status::Terminated) {
+ // Wait for either:
+ // * the thread to fault (or single-step)
+ // * an interrupt from LLDB
+ session_->WaitForDebugStubEvent();
+ }
+}
+
+void Target::ProcessCommands() {
+ // GDB-remote messages are processed in the GDBServer thread.
+
+ if (IsTerminated()) {
+ return;
+ }
+
+ // TODO(paolosev)
+ // For the moment just discard any packet we receive from the debugger.
+ do {
+ if (!session_->GetPacket()) continue;
+ } while (session_->IsConnected());
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/target.h b/deps/v8/src/debug/wasm/gdb-server/target.h
new file mode 100644
index 0000000000..93d02489c5
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/target.h
@@ -0,0 +1,57 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_TARGET_H_
+#define V8_DEBUG_WASM_GDB_SERVER_TARGET_H_
+
+#include <atomic>
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+class GdbServer;
+class Session;
+
+// Class Target represents a debugging target. It contains the logic to decode
+// incoming GDB-remote packets, execute them forwarding the debugger commands
+// and queries to the Wasm engine, and send back GDB-remote packets.
+class Target {
+ public:
+ // Contruct a Target object.
+ explicit Target(GdbServer* gdb_server);
+
+ // This function spin on a debugging session, until it closes.
+ void Run(Session* ses);
+
+ void Terminate();
+ bool IsTerminated() const { return status_ == Status::Terminated; }
+
+ private:
+ // Blocks waiting for one of these two events to occur:
+ // - A network packet arrives from the debugger, or the debugger connection is
+ // closed;
+ // - The debuggee suspends execution because of a trap or breakpoint.
+ void WaitForDebugEvent();
+
+ // Processes GDB-remote packets that arrive from the debugger.
+ // This method should be called when the debuggee has suspended its execution.
+ void ProcessCommands();
+
+ enum class Status { Running, Terminated };
+ std::atomic<Status> status_;
+
+ Session* session_; // Session object not owned by the Target.
+
+ DISALLOW_COPY_AND_ASSIGN(Target);
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_TARGET_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/transport.cc b/deps/v8/src/debug/wasm/gdb-server/transport.cc
new file mode 100644
index 0000000000..385828ff69
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/transport.cc
@@ -0,0 +1,444 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/transport.h"
+#include <fcntl.h>
+
+#ifndef SD_BOTH
+#define SD_BOTH 2
+#endif
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+SocketBinding::SocketBinding(SocketHandle socket_handle)
+ : socket_handle_(socket_handle) {}
+
+// static
+SocketBinding SocketBinding::Bind(uint16_t tcp_port) {
+ SocketHandle socket_handle = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (socket_handle == InvalidSocket) {
+ TRACE_GDB_REMOTE("Failed to create socket.\n");
+ return SocketBinding(InvalidSocket);
+ }
+ struct sockaddr_in sockaddr;
+ // Clearing sockaddr_in first appears to be necessary on Mac OS X.
+ memset(&sockaddr, 0, sizeof(sockaddr));
+ socklen_t addrlen = static_cast<socklen_t>(sizeof(sockaddr));
+ sockaddr.sin_family = AF_INET;
+ sockaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ sockaddr.sin_port = htons(tcp_port);
+
+#if _WIN32
+ // On Windows, SO_REUSEADDR has a different meaning than on POSIX systems.
+ // SO_REUSEADDR allows hijacking of an open socket by another process.
+ // The SO_EXCLUSIVEADDRUSE flag prevents this behavior.
+ // See:
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms740621(v=vs.85).aspx
+ //
+ // Additionally, unlike POSIX, TCP server sockets can be bound to
+ // ports in the TIME_WAIT state, without setting SO_REUSEADDR.
+ int exclusive_address = 1;
+ if (setsockopt(socket_handle, SOL_SOCKET, SO_EXCLUSIVEADDRUSE,
+ reinterpret_cast<char*>(&exclusive_address),
+ sizeof(exclusive_address))) {
+ TRACE_GDB_REMOTE("Failed to set SO_EXCLUSIVEADDRUSE option.\n");
+ }
+#else
+ // On POSIX, this is necessary to ensure that the TCP port is released
+ // promptly when sel_ldr exits. Without this, the TCP port might
+ // only be released after a timeout, and later processes can fail
+ // to bind it.
+ int reuse_address = 1;
+ if (setsockopt(socket_handle, SOL_SOCKET, SO_REUSEADDR,
+ reinterpret_cast<char*>(&reuse_address),
+ sizeof(reuse_address))) {
+ TRACE_GDB_REMOTE("Failed to set SO_REUSEADDR option.\n");
+ }
+#endif
+
+ if (bind(socket_handle, reinterpret_cast<struct sockaddr*>(&sockaddr),
+ addrlen)) {
+ TRACE_GDB_REMOTE("Failed to bind server.\n");
+ return SocketBinding(InvalidSocket);
+ }
+
+ if (listen(socket_handle, 1)) {
+ TRACE_GDB_REMOTE("Failed to listen.\n");
+ return SocketBinding(InvalidSocket);
+ }
+ return SocketBinding(socket_handle);
+}
+
+std::unique_ptr<Transport> SocketBinding::CreateTransport() {
+ return std::make_unique<Transport>(socket_handle_);
+}
+
+uint16_t SocketBinding::GetBoundPort() {
+ struct sockaddr_in saddr;
+ struct sockaddr* psaddr = reinterpret_cast<struct sockaddr*>(&saddr);
+ // Clearing sockaddr_in first appears to be necessary on Mac OS X.
+ memset(&saddr, 0, sizeof(saddr));
+ socklen_t addrlen = static_cast<socklen_t>(sizeof(saddr));
+ if (::getsockname(socket_handle_, psaddr, &addrlen)) {
+ TRACE_GDB_REMOTE("Failed to retrieve bound address.\n");
+ return 0;
+ }
+ return ntohs(saddr.sin_port);
+}
+
+// Do not delay sending small packets. This significantly speeds up
+// remote debugging. Debug stub uses buffering to send outgoing packets
+// so they are not split into more TCP packets than necessary.
+void DisableNagleAlgorithm(SocketHandle socket) {
+ int nodelay = 1;
+ if (::setsockopt(socket, IPPROTO_TCP, TCP_NODELAY,
+ reinterpret_cast<char*>(&nodelay), sizeof(nodelay))) {
+ TRACE_GDB_REMOTE("Failed to set TCP_NODELAY option.\n");
+ }
+}
+
+TransportBase::TransportBase(SocketHandle s)
+ : buf_(new char[kBufSize]),
+ pos_(0),
+ size_(0),
+ handle_bind_(s),
+ handle_accept_(InvalidSocket) {}
+
+TransportBase::~TransportBase() {
+ if (handle_accept_ != InvalidSocket) {
+ CloseSocket(handle_accept_);
+ }
+}
+
+void TransportBase::CopyFromBuffer(char** dst, int32_t* len) {
+ int32_t copy_bytes = std::min(*len, size_ - pos_);
+ memcpy(*dst, buf_.get() + pos_, copy_bytes);
+ pos_ += copy_bytes;
+ *len -= copy_bytes;
+ *dst += copy_bytes;
+}
+
+bool TransportBase::Read(char* dst, int32_t len) {
+ if (pos_ < size_) {
+ CopyFromBuffer(&dst, &len);
+ }
+ while (len > 0) {
+ pos_ = 0;
+ size_ = 0;
+ if (!ReadSomeData()) {
+ return false;
+ }
+ CopyFromBuffer(&dst, &len);
+ }
+ return true;
+}
+
+bool TransportBase::Write(const char* src, int32_t len) {
+ while (len > 0) {
+ ssize_t result = ::send(handle_accept_, src, len, 0);
+ if (result > 0) {
+ src += result;
+ len -= result;
+ continue;
+ }
+ if (result == 0) {
+ return false;
+ }
+ if (SocketGetLastError() != kErrInterrupt) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Return true if there is data to read.
+bool TransportBase::IsDataAvailable() const {
+ if (pos_ < size_) {
+ return true;
+ }
+ fd_set fds;
+
+ FD_ZERO(&fds);
+ FD_SET(handle_accept_, &fds);
+
+ // We want a "non-blocking" check
+ struct timeval timeout;
+ timeout.tv_sec = 0;
+ timeout.tv_usec = 0;
+
+ // Check if this file handle can select on read
+ int cnt = select(static_cast<int>(handle_accept_) + 1, &fds, 0, 0, &timeout);
+
+ // If we are ready, or if there is an error. We return true
+ // on error, to let the next IO request fail.
+ if (cnt != 0) return true;
+
+ return false;
+}
+
+void TransportBase::Close() {
+ ::shutdown(handle_bind_, SD_BOTH);
+ CloseSocket(handle_bind_);
+ Disconnect();
+}
+
+void TransportBase::Disconnect() {
+ if (handle_accept_ != InvalidSocket) {
+ // Shutdown the connection in both directions. This should
+ // always succeed, and nothing we can do if this fails.
+ ::shutdown(handle_accept_, SD_BOTH);
+
+ CloseSocket(handle_accept_);
+ handle_accept_ = InvalidSocket;
+ }
+}
+
+#if _WIN32
+
+Transport::Transport(SocketHandle s) : TransportBase(s) {
+ socket_event_ = WSA_INVALID_EVENT;
+ faulted_thread_event_ = ::CreateEvent(NULL, TRUE, FALSE, NULL);
+ if (faulted_thread_event_ == NULL) {
+ TRACE_GDB_REMOTE(
+ "Transport::Transport: Failed to create event object for faulted"
+ "thread\n");
+ }
+}
+
+Transport::~Transport() {
+ if (!CloseHandle(faulted_thread_event_)) {
+ TRACE_GDB_REMOTE("Transport::~Transport: Failed to close event\n");
+ }
+
+ if (socket_event_) {
+ if (!::WSACloseEvent(socket_event_)) {
+ TRACE_GDB_REMOTE("Transport::~Transport: Failed to close socket event\n");
+ }
+ }
+}
+
+bool Transport::AcceptConnection() {
+ CHECK(handle_accept_ == InvalidSocket);
+ handle_accept_ = ::accept(handle_bind_, NULL, 0);
+ if (handle_accept_ != InvalidSocket) {
+ DisableNagleAlgorithm(handle_accept_);
+
+ // Create socket event
+ socket_event_ = ::WSACreateEvent();
+ if (socket_event_ == WSA_INVALID_EVENT) {
+ TRACE_GDB_REMOTE(
+ "Transport::AcceptConnection: Failed to create socket event\n");
+ }
+
+ // Listen for close events in order to handle them correctly.
+ // Additionally listen for read readiness as WSAEventSelect sets the socket
+ // to non-blocking mode.
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms738547(v=vs.85).aspx
+ if (::WSAEventSelect(handle_accept_, socket_event_, FD_CLOSE | FD_READ) ==
+ SOCKET_ERROR) {
+ TRACE_GDB_REMOTE(
+ "Transport::AcceptConnection: Failed to bind event to socket\n");
+ }
+ return true;
+ }
+ return false;
+}
+
+bool Transport::ReadSomeData() {
+ while (true) {
+ ssize_t result =
+ ::recv(handle_accept_, buf_.get() + size_, kBufSize - size_, 0);
+ if (result > 0) {
+ size_ += result;
+ return true;
+ }
+ if (result == 0) {
+ return false; // The connection was gracefully closed.
+ }
+
+ // WSAEventSelect sets socket to non-blocking mode. This is essential
+ // for socket event notification to work, there is no workaround.
+ // See remarks section at the page
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
+ if (SocketGetLastError() == WSAEWOULDBLOCK) {
+ if (::WaitForSingleObject(socket_event_, INFINITE) == WAIT_FAILED) {
+ TRACE_GDB_REMOTE(
+ "Transport::ReadSomeData: Failed to wait on socket event\n");
+ }
+ if (!::ResetEvent(socket_event_)) {
+ TRACE_GDB_REMOTE(
+ "Transport::ReadSomeData: Failed to reset socket event\n");
+ }
+ continue;
+ }
+
+ if (SocketGetLastError() != kErrInterrupt) {
+ return false;
+ }
+ }
+}
+
+void Transport::WaitForDebugStubEvent() {
+ // Don't wait if we already have data to read.
+ bool wait = !(pos_ < size_);
+
+ HANDLE handles[2];
+ handles[0] = faulted_thread_event_;
+ handles[1] = socket_event_;
+ int count = size_ < kBufSize ? 2 : 1;
+ int result =
+ WaitForMultipleObjects(count, handles, FALSE, wait ? INFINITE : 0);
+ if (result == WAIT_OBJECT_0 + 1) {
+ if (!ResetEvent(socket_event_)) {
+ TRACE_GDB_REMOTE(
+ "Transport::WaitForDebugStubEvent: Failed to reset socket event\n");
+ }
+ return;
+ } else if (result == WAIT_OBJECT_0) {
+ if (!ResetEvent(faulted_thread_event_)) {
+ TRACE_GDB_REMOTE(
+ "Transport::WaitForDebugStubEvent: Failed to reset event\n");
+ }
+ return;
+ } else if (result == WAIT_TIMEOUT) {
+ return;
+ }
+ TRACE_GDB_REMOTE(
+ "Transport::WaitForDebugStubEvent: Wait for events failed\n");
+}
+
+bool Transport::SignalThreadEvent() {
+ if (!SetEvent(faulted_thread_event_)) {
+ return false;
+ }
+ return true;
+}
+
+void Transport::Disconnect() {
+ TransportBase::Disconnect();
+
+ if (socket_event_ != WSA_INVALID_EVENT && !::WSACloseEvent(socket_event_)) {
+ TRACE_GDB_REMOTE("Transport::~Transport: Failed to close socket event\n");
+ }
+ socket_event_ = WSA_INVALID_EVENT;
+ SignalThreadEvent();
+}
+
+#else // _WIN32
+
+Transport::Transport(SocketHandle s) : TransportBase(s) {
+ int fds[2];
+#if defined(__linux__)
+ int ret = pipe2(fds, O_CLOEXEC);
+#else
+ int ret = pipe(fds);
+#endif
+ if (ret < 0) {
+ TRACE_GDB_REMOTE(
+ "Transport::Transport: Failed to allocate pipe for faulted thread\n");
+ }
+ faulted_thread_fd_read_ = fds[0];
+ faulted_thread_fd_write_ = fds[1];
+}
+
+Transport::~Transport() {
+ if (close(faulted_thread_fd_read_) != 0) {
+ TRACE_GDB_REMOTE("Transport::~Transport: Failed to close event\n");
+ }
+ if (close(faulted_thread_fd_write_) != 0) {
+ TRACE_GDB_REMOTE("Transport::~Transport: Failed to close event\n");
+ }
+}
+
+bool Transport::AcceptConnection() {
+ CHECK(handle_accept_ == InvalidSocket);
+ handle_accept_ = ::accept(handle_bind_, NULL, 0);
+ if (handle_accept_ != InvalidSocket) {
+ DisableNagleAlgorithm(handle_accept_);
+ return true;
+ }
+ return false;
+}
+
+bool Transport::ReadSomeData() {
+ while (true) {
+ ssize_t result =
+ ::recv(handle_accept_, buf_.get() + size_, kBufSize - size_, 0);
+ if (result > 0) {
+ size_ += result;
+ return true;
+ }
+ if (result == 0) {
+ return false; // The connection was gracefully closed.
+ }
+ if (SocketGetLastError() != kErrInterrupt) {
+ return false;
+ }
+ }
+}
+
+void Transport::WaitForDebugStubEvent() {
+ // Don't wait if we already have data to read.
+ bool wait = !(pos_ < size_);
+
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(faulted_thread_fd_read_, &fds);
+ int max_fd = faulted_thread_fd_read_;
+ if (size_ < kBufSize) {
+ FD_SET(handle_accept_, &fds);
+ max_fd = std::max(max_fd, handle_accept_);
+ }
+
+ int ret;
+ // We don't need sleep-polling on Linux now, so we set either zero or infinite
+ // timeout.
+ if (wait) {
+ ret = select(max_fd + 1, &fds, NULL, NULL, NULL);
+ } else {
+ struct timeval timeout;
+ timeout.tv_sec = 0;
+ timeout.tv_usec = 0;
+ ret = select(max_fd + 1, &fds, NULL, NULL, &timeout);
+ }
+ if (ret < 0) {
+ TRACE_GDB_REMOTE(
+ "Transport::WaitForDebugStubEvent: Failed to wait for "
+ "debug stub event\n");
+ }
+
+ if (ret > 0) {
+ if (FD_ISSET(faulted_thread_fd_read_, &fds)) {
+ char buf[16];
+ if (read(faulted_thread_fd_read_, &buf, sizeof(buf)) < 0) {
+ TRACE_GDB_REMOTE(
+ "Transport::WaitForDebugStubEvent: Failed to read from "
+ "debug stub event pipe fd\n");
+ }
+ }
+ if (FD_ISSET(handle_accept_, &fds)) ReadSomeData();
+ }
+}
+
+bool Transport::SignalThreadEvent() {
+ // Notify the debug stub by marking the thread as faulted.
+ char buf = 0;
+ if (write(faulted_thread_fd_write_, &buf, sizeof(buf)) != sizeof(buf)) {
+ TRACE_GDB_REMOTE("SignalThreadEvent: Can't send debug stub event\n");
+ return false;
+ }
+ return true;
+}
+
+#endif // _WIN32
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#undef SD_BOTH
diff --git a/deps/v8/src/debug/wasm/gdb-server/transport.h b/deps/v8/src/debug/wasm/gdb-server/transport.h
new file mode 100644
index 0000000000..077b1d1097
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/transport.h
@@ -0,0 +1,183 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_TRANSPORT_H_
+#define V8_DEBUG_WASM_GDB_SERVER_TRANSPORT_H_
+
+#include <sstream>
+#include <vector>
+#include "src/base/macros.h"
+#include "src/debug/wasm/gdb-server/util.h"
+
+#if _WIN32
+#include <windows.h>
+#include <winsock2.h>
+
+typedef SOCKET SocketHandle;
+
+#define CloseSocket closesocket
+#define InvalidSocket INVALID_SOCKET
+#define SocketGetLastError() WSAGetLastError()
+static const int kErrInterrupt = WSAEINTR;
+typedef int ssize_t;
+typedef int socklen_t;
+
+#else // _WIN32
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/tcp.h>
+#include <sys/select.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <string>
+
+typedef int SocketHandle;
+
+#define CloseSocket close
+#define InvalidSocket (-1)
+#define SocketGetLastError() errno
+static const int kErrInterrupt = EINTR;
+
+#endif // _WIN32
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+class Transport;
+
+// Acts as a factory for Transport objects bound to a specified TCP port.
+class SocketBinding {
+ public:
+ // Wrap existing socket handle.
+ explicit SocketBinding(SocketHandle socket_handle);
+
+ // Bind to the specified TCP port.
+ static SocketBinding Bind(uint16_t tcp_port);
+
+ bool IsValid() const { return socket_handle_ != InvalidSocket; }
+
+ // Create a transport object from this socket binding
+ std::unique_ptr<Transport> CreateTransport();
+
+ // Get port the socket is bound to.
+ uint16_t GetBoundPort();
+
+ private:
+ SocketHandle socket_handle_;
+};
+
+class TransportBase {
+ public:
+ explicit TransportBase(SocketHandle s);
+ virtual ~TransportBase();
+
+ // Read {len} bytes from this transport, possibly blocking until enough data
+ // is available.
+ // {dst} must point to a buffer large enough to contain {len} bytes.
+ // Returns true on success.
+ // Returns false if the connection is closed; in that case the {dst} may have
+ // been partially overwritten.
+ bool Read(char* dst, int32_t len);
+
+ // Write {len} bytes to this transport.
+ // Return true on success, false if the connection is closed.
+ bool Write(const char* src, int32_t len);
+
+ // Return true if there is data to read.
+ bool IsDataAvailable() const;
+
+ // Shuts down this transport, gracefully closing the existing connection and
+ // also closing the listening socket. This should be called when the GDB stub
+ // shuts down, when the program terminates.
+ void Close();
+
+ // If a socket connection with a debugger is present, gracefully closes it.
+ // This should be called when a debugging session gets closed.
+ virtual void Disconnect();
+
+ protected:
+ // Copy buffered data to *dst up to len bytes and update dst and len.
+ void CopyFromBuffer(char** dst, int32_t* len);
+
+ // Read available data from the socket. Return false on EOF or error.
+ virtual bool ReadSomeData() = 0;
+
+ static const int kBufSize = 4096;
+ std::unique_ptr<char[]> buf_;
+ int32_t pos_;
+ int32_t size_;
+ SocketHandle handle_bind_;
+ SocketHandle handle_accept_;
+};
+
+#if _WIN32
+
+class Transport : public TransportBase {
+ public:
+ explicit Transport(SocketHandle s);
+ ~Transport() override;
+
+ // Waits for an incoming connection on the bound port.
+ bool AcceptConnection();
+
+ // Blocks waiting for one of these two events to occur:
+ // - A network event (a new packet arrives, or the connection is dropped),
+ // - A thread event is signaled (the execution stopped because of a trap or
+ // breakpoint).
+ void WaitForDebugStubEvent();
+
+ // Signal that this transport should leave an alertable wait state because
+ // the execution of the debuggee was stopped because of a trap or breakpoint.
+ bool SignalThreadEvent();
+
+ void Disconnect() override;
+
+ private:
+ bool ReadSomeData() override;
+
+ HANDLE socket_event_;
+ HANDLE faulted_thread_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(Transport);
+};
+
+#else // _WIN32
+
+class Transport : public TransportBase {
+ public:
+ explicit Transport(SocketHandle s);
+ ~Transport() override;
+
+ // Waits for an incoming connection on the bound port.
+ bool AcceptConnection();
+
+ // Blocks waiting for one of these two events to occur:
+ // - A network event (a new packet arrives, or the connection is dropped),
+ // - The debuggee suspends execution because of a trap or breakpoint.
+ void WaitForDebugStubEvent();
+
+ // Signal that this transport should leave an alertable wait state because
+ // the execution of the debuggee was stopped because of a trap or breakpoint.
+ bool SignalThreadEvent();
+
+ private:
+ bool ReadSomeData() override;
+
+ int faulted_thread_fd_read_;
+ int faulted_thread_fd_write_;
+
+ DISALLOW_COPY_AND_ASSIGN(Transport);
+};
+
+#endif // _WIN32
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_TRANSPORT_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/util.h b/deps/v8/src/debug/wasm/gdb-server/util.h
new file mode 100644
index 0000000000..5e977741d8
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/util.h
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_UTIL_H_
+#define V8_DEBUG_WASM_GDB_SERVER_UTIL_H_
+
+#include <string>
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+#define TRACE_GDB_REMOTE(...) \
+ do { \
+ if (FLAG_trace_wasm_gdb_remote) PrintF("[gdb-remote] " __VA_ARGS__); \
+ } while (false)
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_UTIL_H_
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index becdc93a4c..c04e49282e 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -12,8 +12,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -262,6 +263,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 300e65ab00..9477fe185b 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -9,12 +9,18 @@
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/pointer-authentication.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = true;
-const int Deoptimizer::kDeoptExitSize = kInstrSize;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = kInstrSize;
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
+#else
+const int Deoptimizer::kLazyDeoptExitSize = 1 * kInstrSize;
+#endif
#define __ masm->
@@ -288,6 +294,9 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ __ Autiasp();
+#endif
__ Br(continuation);
}
@@ -297,6 +306,14 @@ Float32 RegisterValues::GetFloatRegister(unsigned n) const {
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ // TODO(v8:10026): check that the pointer is still in the list of allowed
+ // builtins.
+ Address new_context =
+ static_cast<Address>(GetTop()) + offset + kPCOnStackSize;
+ uint64_t old_context = GetTop() + GetFrameSize();
+ PointerAuthentication::ReplaceContext(reinterpret_cast<Address*>(&value),
+ old_context, new_context);
+
SetFrameSlot(offset, value);
}
@@ -309,6 +326,12 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) {
+ // TODO(v8:10026): we should only accept a specific list of allowed builtins
+ // here.
+ pc_ = PointerAuthentication::SignPCWithSP(pc, GetTop());
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index cfb07248dc..4a23da8532 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -55,6 +55,7 @@ namespace internal {
V(ValueMismatch, "value mismatch") \
V(WrongCallTarget, "wrong call target") \
V(WrongEnumIndices, "wrong enum indices") \
+ V(WrongFeedbackCell, "wrong feedback cell") \
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
V(WrongName, "wrong name") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 56431d4eea..9fcccd483c 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -14,6 +14,7 @@
#include "src/codegen/register-configuration.h"
#include "src/diagnostics/disasm.h"
#include "src/execution/frames-inl.h"
+#include "src/execution/pointer-authentication.h"
#include "src/execution/v8threads.h"
#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h"
@@ -122,7 +123,8 @@ class FrameWriter {
PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- ",
output_address(output_offset), output_offset);
if (obj.IsSmi()) {
- PrintF(V8PRIxPTR_FMT " <Smi %d>", obj.ptr(), Smi::cast(obj).value());
+ PrintF(trace_scope_->file(), V8PRIxPTR_FMT " <Smi %d>", obj.ptr(),
+ Smi::cast(obj).value());
} else {
obj.ShortPrint(trace_scope_->file());
}
@@ -252,7 +254,10 @@ class ActivationsFinder : public ThreadVisitor {
int trampoline_pc = safepoint.trampoline_pc();
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
// Replace the current pc on the stack with the trampoline.
- it.frame()->set_pc(code.raw_instruction_start() + trampoline_pc);
+ // TODO(v8:10026): avoid replacing a signed pointer.
+ Address* pc_addr = it.frame()->pc_address();
+ Address new_pc = code.raw_instruction_start() + trampoline_pc;
+ PointerAuthentication::ReplacePC(pc_addr, new_pc, kSystemPointerSize);
}
}
}
@@ -528,17 +533,37 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
InternalFormalParameterCountWithReceiver(function.shared());
input_ = new (size) FrameDescription(size, parameter_count);
- if (kSupportsFixedDeoptExitSize) {
+ if (kSupportsFixedDeoptExitSizes) {
DCHECK_EQ(bailout_id_, kMaxUInt32);
// Calculate bailout id from return address.
- DCHECK_GT(kDeoptExitSize, 0);
+ DCHECK_GT(kNonLazyDeoptExitSize, 0);
+ DCHECK_GT(kLazyDeoptExitSize, 0);
DeoptimizationData deopt_data =
DeoptimizationData::cast(compiled_code_.deoptimization_data());
Address deopt_start = compiled_code_.raw_instruction_start() +
deopt_data.DeoptExitStart().value();
- int offset = static_cast<int>(from_ - kDeoptExitSize - deopt_start);
- DCHECK_EQ(0, offset % kDeoptExitSize);
- bailout_id_ = offset / kDeoptExitSize;
+ int non_lazy_deopt_count = deopt_data.NonLazyDeoptCount().value();
+ Address lazy_deopt_start =
+ deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
+ // The deoptimization exits are sorted so that lazy deopt exits appear last.
+ static_assert(DeoptimizeKind::kLazy > DeoptimizeKind::kEager,
+ "lazy deopts are expected to be emitted last");
+ static_assert(DeoptimizeKind::kLazy > DeoptimizeKind::kSoft,
+ "lazy deopts are expected to be emitted last");
+ // from_ is the value of the link register after the call to the
+ // deoptimizer, so for the last lazy deopt, from_ points to the first
+ // non-lazy deopt, so we use <=.
+ if (from_ <= lazy_deopt_start) {
+ int offset =
+ static_cast<int>(from_ - kNonLazyDeoptExitSize - deopt_start);
+ DCHECK_EQ(0, offset % kNonLazyDeoptExitSize);
+ bailout_id_ = offset / kNonLazyDeoptExitSize;
+ } else {
+ int offset =
+ static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
+ DCHECK_EQ(0, offset % kLazyDeoptExitSize);
+ bailout_id_ = non_lazy_deopt_count + (offset / kLazyDeoptExitSize);
+ }
}
}
@@ -690,6 +715,13 @@ void Deoptimizer::DoComputeOutputFrames() {
caller_fp_ = Memory<intptr_t>(fp_address);
caller_pc_ =
Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset);
+ // Sign caller_pc_ with caller_frame_top_ to be consistent with everything
+ // else here.
+ uint64_t sp = stack_fp_ + StandardFrameConstants::kCallerSPOffset;
+ // TODO(v8:10026): avoid replacing a signed pointer.
+ PointerAuthentication::ReplaceContext(
+ reinterpret_cast<Address*>(&caller_pc_), sp, caller_frame_top_);
+
input_frame_context_ = Memory<intptr_t>(
fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
@@ -1717,7 +1749,8 @@ void Deoptimizer::MaterializeHeapObjects() {
Handle<Object> value = materialization.value_->GetValue();
if (trace_scope_ != nullptr) {
- PrintF("Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; ",
+ PrintF(trace_scope_->file(),
+ "Materialization [" V8PRIxPTR_FMT "] <- " V8PRIxPTR_FMT " ; ",
static_cast<intptr_t>(materialization.output_slot_address_),
value->ptr());
value->ShortPrint(trace_scope_->file());
@@ -3869,7 +3902,7 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
if (frames_[i].kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuation &&
frames_[i].shared_info()->internal_formal_parameter_count() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ kDontAdaptArgumentsSentinel) {
DCHECK(frames_[i].shared_info()->IsApiFunction());
// The argument count for this special case is always the second
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index beb2a9aa50..41ef7d2336 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -513,11 +513,12 @@ class Deoptimizer : public Malloced {
// Set to true when the architecture supports deoptimization exit sequences
// of a fixed size, that can be sorted so that the deoptimization index is
// deduced from the address of the deoptimization exit.
- static const bool kSupportsFixedDeoptExitSize;
+ static const bool kSupportsFixedDeoptExitSizes;
// Size of deoptimization exit sequence. This is only meaningful when
- // kSupportsFixedDeoptExitSize is true.
- static const int kDeoptExitSize;
+ // kSupportsFixedDeoptExitSizes is true.
+ static const int kNonLazyDeoptExitSize;
+ static const int kLazyDeoptExitSize;
private:
friend class FrameWriter;
@@ -712,7 +713,7 @@ class FrameDescription {
void SetTop(intptr_t top) { top_ = top; }
intptr_t GetPc() const { return pc_; }
- void SetPc(intptr_t pc) { pc_ = pc; }
+ void SetPc(intptr_t pc);
intptr_t GetFp() const { return fp_; }
void SetFp(intptr_t fp) { fp_ = fp; }
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 2fd424a667..19be03c1e3 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -13,8 +13,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -217,6 +218,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index bf82e2631b..80221c5cbe 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -10,8 +10,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -236,6 +237,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index a1138d202f..3b8b1b9659 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -10,8 +10,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -236,6 +237,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 1d05968806..0a8798dcc8 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -11,8 +11,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -258,6 +259,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 63ea22f71a..66d9d0db8e 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -10,8 +10,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -254,6 +255,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 724062c506..ea13361341 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -13,8 +13,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSize = false;
-const int Deoptimizer::kDeoptExitSize = 0;
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
+const int Deoptimizer::kNonLazyDeoptExitSize = 0;
+const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
@@ -221,18 +222,10 @@ Float32 RegisterValues::GetFloatRegister(unsigned n) const {
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
- if (kPCOnStackSize == 2 * kSystemPointerSize) {
- // Zero out the high-32 bit of PC for x32 port.
- SetFrameSlot(offset + kSystemPointerSize, 0);
- }
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
- if (kFPOnStackSize == 2 * kSystemPointerSize) {
- // Zero out the high-32 bit of FP for x32 port.
- SetFrameSlot(offset + kSystemPointerSize, 0);
- }
SetFrameSlot(offset, value);
}
@@ -241,6 +234,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
UNREACHABLE();
}
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index 5a87f73529..c2a82a5837 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -1436,7 +1436,7 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
#define PAUTH_CASE(NAME, MN) \
case NAME: \
mnemonic = MN; \
- form = NULL; \
+ form = nullptr; \
break;
PAUTH_SYSTEM_MNEMONICS(PAUTH_CASE)
@@ -1478,17 +1478,30 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
}
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
DCHECK(instr->Mask(SystemHintMask) == HINT);
+ form = nullptr;
switch (instr->ImmHint()) {
- case NOP: {
+ case NOP:
mnemonic = "nop";
- form = nullptr;
break;
- }
- case CSDB: {
+ case CSDB:
mnemonic = "csdb";
- form = nullptr;
break;
- }
+ case BTI:
+ mnemonic = "bti";
+ break;
+ case BTI_c:
+ mnemonic = "bti c";
+ break;
+ case BTI_j:
+ mnemonic = "bti j";
+ break;
+ case BTI_jc:
+ mnemonic = "bti jc";
+ break;
+ default:
+ // Fall back to 'hint #<imm7>'.
+ form = "'IH";
+ mnemonic = "hint";
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
switch (instr->Mask(MemBarrierMask)) {
diff --git a/deps/v8/src/diagnostics/code-tracer.h b/deps/v8/src/diagnostics/code-tracer.h
index 6dd8d3e086..a9f276bf44 100644
--- a/deps/v8/src/diagnostics/code-tracer.h
+++ b/deps/v8/src/diagnostics/code-tracer.h
@@ -52,6 +52,9 @@ class CodeTracer final : public Malloced {
if (file_ == nullptr) {
file_ = base::OS::FOpen(filename_.begin(), "ab");
+ CHECK_WITH_MSG(file_ != nullptr,
+ "could not open file. If on Android, try passing "
+ "--redirect-code-traces-to=/sdcard/Download/<file-name>");
}
scope_depth_++;
@@ -63,6 +66,7 @@ class CodeTracer final : public Malloced {
}
if (--scope_depth_ == 0) {
+ DCHECK_NOT_NULL(file_);
fclose(file_);
file_ = nullptr;
}
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 0cb36d1228..129c2d72e1 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -781,6 +781,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
break; \
}
+ SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
#undef DECLARE_SSE_AVX_RM_DIS_CASE
default:
@@ -2100,6 +2101,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSSE3_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_RM_INSTRUCTION_LIST(SSE34_DIS_CASE)
#undef SSE34_DIS_CASE
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index d087a6d21d..4a9d029a05 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -32,6 +32,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
#include "src/objects/objects-inl.h"
+#include "src/roots/roots.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
#include "src/objects/js-collator-inl.h"
@@ -138,6 +139,10 @@ void Smi::SmiVerify(Isolate* isolate) {
CHECK(!IsConstructor());
}
+void TaggedIndex::TaggedIndexVerify(Isolate* isolate) {
+ CHECK(IsTaggedIndex());
+}
+
void HeapObject::HeapObjectVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::HeapObjectVerify(*this, isolate);
@@ -242,6 +247,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
// Every class that has its fields defined in a .tq file and corresponds
// to exactly one InstanceType value is included in the following list.
TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
+ TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
#undef MAKE_TORQUE_CASE
case ALLOCATION_SITE_TYPE:
@@ -812,12 +818,20 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
}
void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
+ // TODO(leszeks): Add a TorqueGeneratedClassVerifier for OffThreadIsolate.
TorqueGeneratedClassVerifiers::SharedFunctionInfoVerify(*this, isolate);
+ this->SharedFunctionInfoVerify(ReadOnlyRoots(isolate));
+}
+
+void SharedFunctionInfo::SharedFunctionInfoVerify(OffThreadIsolate* isolate) {
+ this->SharedFunctionInfoVerify(ReadOnlyRoots(isolate));
+}
+void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
Object value = name_or_scope_info();
if (value.IsScopeInfo()) {
CHECK_LT(0, ScopeInfo::cast(value).length());
- CHECK_NE(value, ReadOnlyRoots(isolate).empty_scope_info());
+ CHECK_NE(value, roots.empty_scope_info());
}
CHECK(HasWasmExportedFunctionData() || IsApiFunction() ||
@@ -826,13 +840,13 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
HasUncompiledDataWithoutPreparseData() || HasWasmJSFunctionData() ||
HasWasmCapiFunctionData());
- CHECK(script_or_debug_info().IsUndefined(isolate) ||
+ CHECK(script_or_debug_info().IsUndefined(roots) ||
script_or_debug_info().IsScript() || HasDebugInfo());
if (!is_compiled()) {
CHECK(!HasFeedbackMetadata());
CHECK(outer_scope_info().IsScopeInfo() ||
- outer_scope_info().IsTheHole(isolate));
+ outer_scope_info().IsTheHole(roots));
} else if (HasBytecodeArray() && HasFeedbackMetadata()) {
CHECK(feedback_metadata().IsFeedbackMetadata());
}
@@ -1057,8 +1071,8 @@ void WeakCell::WeakCellVerify(Isolate* isolate) {
CHECK(key_list_next().IsWeakCell() || key_list_next().IsUndefined(isolate));
- CHECK(finalization_group().IsUndefined(isolate) ||
- finalization_group().IsJSFinalizationGroup());
+ CHECK(finalization_registry().IsUndefined(isolate) ||
+ finalization_registry().IsJSFinalizationRegistry());
}
void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
@@ -1067,8 +1081,8 @@ void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
CHECK(target().IsUndefined(isolate) || target().IsJSReceiver());
}
-void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
- CHECK(IsJSFinalizationGroup());
+void JSFinalizationRegistry::JSFinalizationRegistryVerify(Isolate* isolate) {
+ CHECK(IsJSFinalizationRegistry());
JSObjectVerify(isolate);
VerifyHeapPointer(isolate, cleanup());
CHECK(active_cells().IsUndefined(isolate) || active_cells().IsWeakCell());
@@ -1079,14 +1093,15 @@ void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
- CHECK(next().IsUndefined(isolate) || next().IsJSFinalizationGroup());
+ CHECK(next_dirty().IsUndefined(isolate) ||
+ next_dirty().IsJSFinalizationRegistry());
}
-void JSFinalizationGroupCleanupIterator::
- JSFinalizationGroupCleanupIteratorVerify(Isolate* isolate) {
- CHECK(IsJSFinalizationGroupCleanupIterator());
+void JSFinalizationRegistryCleanupIterator::
+ JSFinalizationRegistryCleanupIteratorVerify(Isolate* isolate) {
+ CHECK(IsJSFinalizationRegistryCleanupIterator());
JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, finalization_group());
+ VerifyHeapPointer(isolate, finalization_registry());
}
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 9dae2881b9..e36171edfd 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -207,6 +207,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
// Every class that has its fields defined in a .tq file and corresponds
// to exactly one InstanceType value is included in the following list.
TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
+ TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
#undef MAKE_TORQUE_CASE
case ALLOCATION_SITE_TYPE:
@@ -1150,7 +1151,7 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
void WeakCell::WeakCellPrint(std::ostream& os) {
PrintHeader(os, "WeakCell");
- os << "\n - finalization_group: " << Brief(finalization_group());
+ os << "\n - finalization_registry: " << Brief(finalization_registry());
os << "\n - target: " << Brief(target());
os << "\n - holdings: " << Brief(holdings());
os << "\n - prev: " << Brief(prev());
@@ -1166,8 +1167,8 @@ void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSFinalizationGroup::JSFinalizationGroupPrint(std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSFinalizationGroup");
+void JSFinalizationRegistry::JSFinalizationRegistryPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSFinalizationRegistry");
os << "\n - native_context: " << Brief(native_context());
os << "\n - cleanup: " << Brief(cleanup());
os << "\n - active_cells: " << Brief(active_cells());
@@ -1186,10 +1187,10 @@ void JSFinalizationGroup::JSFinalizationGroupPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSFinalizationGroupCleanupIterator::
- JSFinalizationGroupCleanupIteratorPrint(std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSFinalizationGroupCleanupIterator");
- os << "\n - finalization_group: " << Brief(finalization_group());
+void JSFinalizationRegistryCleanupIterator::
+ JSFinalizationRegistryCleanupIteratorPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSFinalizationRegistryCleanupIterator");
+ os << "\n - finalization_registry: " << Brief(finalization_registry());
JSObjectPrintBody(os, *this);
}
@@ -1306,12 +1307,12 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
}
if (WasmExportedFunction::IsWasmExportedFunction(*this)) {
WasmExportedFunction function = WasmExportedFunction::cast(*this);
- os << "\n - WASM instance: " << Brief(function.instance());
- os << "\n - WASM function index: " << function.function_index();
+ os << "\n - Wasm instance: " << Brief(function.instance());
+ os << "\n - Wasm function index: " << function.function_index();
}
if (WasmJSFunction::IsWasmJSFunction(*this)) {
WasmJSFunction function = WasmJSFunction::cast(*this);
- os << "\n - WASM wrapper around: " << Brief(function.GetCallable());
+ os << "\n - Wasm wrapper around: " << Brief(function.GetCallable());
}
shared().PrintSourceCode(os);
JSObjectPrintBody(os, *this);
@@ -1364,6 +1365,12 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
if (HasInferredName()) {
os << "\n - inferred name: " << Brief(inferred_name());
}
+ if (class_scope_has_private_brand()) {
+ os << "\n - class_scope_has_private_brand";
+ }
+ if (has_static_private_methods_or_accessors()) {
+ os << "\n - has_static_private_methods_or_accessors";
+ }
os << "\n - kind: " << kind();
os << "\n - syntax kind: " << syntax_kind();
if (needs_home_object()) {
@@ -1779,7 +1786,7 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
os << "\n - tagged_buffer: " << Brief(tagged_buffer());
os << "\n - offset: " << offset();
os << "\n - flags: " << flags();
- os << "\n - type: " << type();
+ os << "\n - type: " << type().kind();
os << "\n - is_mutable: " << is_mutable();
os << "\n";
}
@@ -1986,15 +1993,18 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - context data: " << Brief(context_data());
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
- if (has_eval_from_shared()) {
- os << "\n - eval from shared: " << Brief(eval_from_shared());
- }
- if (is_wrapped()) {
- os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
- }
- os << "\n - eval from position: " << eval_from_position();
- if (has_wasm_breakpoint_infos()) {
- os << "\n - wasm_breakpoint_infos: " << Brief(wasm_breakpoint_infos());
+ if (type() == TYPE_WASM) {
+ if (has_wasm_breakpoint_infos()) {
+ os << "\n - wasm_breakpoint_infos: " << Brief(wasm_breakpoint_infos());
+ }
+ } else {
+ if (has_eval_from_shared()) {
+ os << "\n - eval from shared: " << Brief(eval_from_shared());
+ }
+ if (is_wrapped()) {
+ os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
+ }
+ os << "\n - eval from position: " << eval_from_position();
}
os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
@@ -2004,7 +2014,6 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSV8BreakIterator");
os << "\n - locale: " << Brief(locale());
- os << "\n - type: " << TypeAsString();
os << "\n - break iterator: " << Brief(break_iterator());
os << "\n - unicode string: " << Brief(unicode_string());
os << "\n - bound adopt text: " << Brief(bound_adopt_text());
@@ -2059,7 +2068,6 @@ void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSNumberFormat");
os << "\n - locale: " << Brief(locale());
- os << "\n - numberingSystem: " << Brief(numberingSystem());
os << "\n - icu_number_formatter: " << Brief(icu_number_formatter());
os << "\n - bound_format: " << Brief(bound_format());
JSObjectPrintBody(os, *this);
@@ -2079,7 +2087,6 @@ void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
JSObjectPrintHeader(os, *this, "JSRelativeTimeFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - numberingSystem: " << Brief(numberingSystem());
- os << "\n - style: " << StyleAsString();
os << "\n - numeric: " << NumericAsString();
os << "\n - icu formatter: " << Brief(icu_formatter());
os << "\n";
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 2192063508..dbe78ddf2d 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -81,7 +81,8 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachIA32 = 3;
static const uint32_t kElfMachX64 = 62;
static const uint32_t kElfMachARM = 40;
- static const uint32_t kElfMachMIPS = 10;
+ static const uint32_t kElfMachMIPS = 8;
+ static const uint32_t kElfMachMIPS64 = 8;
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
@@ -95,6 +96,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachARM;
#elif V8_TARGET_ARCH_MIPS
return kElfMachMIPS;
+#elif V8_TARGET_ARCH_MIPS64
+ return kElfMachMIPS64;
#elif V8_TARGET_ARCH_ARM64
return kElfMachARM64;
#elif V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index e7d26858e5..99767f17dc 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -27,7 +27,7 @@
#include <stdio.h>
#include <string.h>
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/base/platform/platform.h"
#include "src/codegen/macro-assembler.h"
@@ -1519,4 +1519,4 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
} // namespace disasm
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/diagnostics/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index 64adf17b82..c08fe20330 100644
--- a/deps/v8/src/diagnostics/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -7,6 +7,7 @@
#include "include/v8.h"
#include "src/common/globals.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/pointer-authentication.h"
namespace v8 {
@@ -87,8 +88,9 @@ void* GetReturnAddressFromFP(void* fp, void* pc,
caller_pc_offset = i::EntryFrameConstants::kDirectCallerPCOffset;
}
#endif
- return reinterpret_cast<void*>(
- Load(reinterpret_cast<i::Address>(fp) + caller_pc_offset));
+ i::Address ret_addr =
+ Load(reinterpret_cast<i::Address>(fp) + caller_pc_offset);
+ return reinterpret_cast<void*>(i::PointerAuthentication::StripPAC(ret_addr));
}
void* GetReturnAddressFromFP(void* fp, void* pc,
@@ -99,8 +101,9 @@ void* GetReturnAddressFromFP(void* fp, void* pc,
caller_pc_offset = i::EntryFrameConstants::kDirectCallerPCOffset;
}
#endif
- return reinterpret_cast<void*>(
- Load(reinterpret_cast<i::Address>(fp) + caller_pc_offset));
+ i::Address ret_addr =
+ Load(reinterpret_cast<i::Address>(fp) + caller_pc_offset);
+ return reinterpret_cast<void*>(i::PointerAuthentication::StripPAC(ret_addr));
}
void* GetCallerFPFromFP(void* fp, void* pc,
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index cd1b976c18..f3b9a753af 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -16,37 +16,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
@@ -548,7 +517,7 @@ void RegisterNonABICompliantCodeRange(void* start, size_t size_in_bytes) {
// When the --win64-unwinding-info flag is set, we call
// RtlAddGrowableFunctionTable to register unwinding info for the whole code
- // range of an isolate or WASM module. This enables the Windows OS stack
+ // range of an isolate or Wasm module. This enables the Windows OS stack
// unwinder to work correctly with V8-generated code, enabling stack walking
// in Windows debuggers and performance tools. However, the
// RtlAddGrowableFunctionTable API is only supported on Windows 8 and above.
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.h b/deps/v8/src/diagnostics/unwinding-info-win64.h
index 102df15590..ca66437e00 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.h
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.h
@@ -32,7 +32,7 @@ bool CanEmitUnwindInfoForBuiltins();
/**
* Returns true if V8 if we can register unwinding data for the whole code range
- * of an isolate or WASM module. The first page of the code range is reserved
+ * of an isolate or Wasm module. The first page of the code range is reserved
* and writable, to be used to store unwind data, as documented in:
* https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
* In jitless mode V8 does not allocate any executable memory itself so the only
@@ -52,9 +52,9 @@ void UnregisterNonABICompliantCodeRange(void* start);
/**
* Default count of RUNTIME_FUNCTION needed. For Windows X64, 1 RUNTIME_FUNCTION
* covers 4GB range which is sufficient to cover the whole code range of an
- * isolate or WASM module. For Windows ARM64, 1 RUNTIME_FUNCTION covers
+ * isolate or Wasm module. For Windows ARM64, 1 RUNTIME_FUNCTION covers
* kMaxFunctionLength bytes so multiple RUNTIME_FUNCTION structs could be needed
- * to cover the whole code range of an isolate or WASM module. The extra
+ * to cover the whole code range of an isolate or Wasm module. The extra
* RUNTIME_FUNCTIONs are assumed following the first one in the reserved page.
*/
static const uint32_t kDefaultRuntimeFunctionCount = 1;
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index a1331784a3..d603e6169c 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -929,15 +929,16 @@ int DisassemblerX64::AVXInstruction(byte* data) {
SSE4_2_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
#undef DECLARE_SSE_AVX_DIS_CASE
-#define DECLARE_SSE_PMOV_AVX_DIS_CASE(instruction, notUsed1, notUsed2, \
+#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, \
notUsed3, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
current += PrintRightXMMOperand(current); \
break; \
}
- SSE4_PMOV_INSTRUCTION_LIST(DECLARE_SSE_PMOV_AVX_DIS_CASE)
-#undef DECLARE_SSE_PMOV_AVX_DIS_CASE
+ SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AVX_DIS_CASE)
+ SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AVX_DIS_CASE)
+#undef DECLARE_SSE_UNOP_AVX_DIS_CASE
default:
UnimplementedInstruction();
@@ -1067,6 +1068,10 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5B:
+ AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5C:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1091,6 +1096,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x70:
+ AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
case 0x7F:
AppendToBuffer("vmovdqu ");
current += PrintRightXMMOperand(current);
@@ -1426,21 +1436,6 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vmovmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x54:
- AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x56:
- AppendToBuffer("vorpd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x57:
- AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
case 0x6E:
AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
@@ -1505,6 +1500,15 @@ int DisassemblerX64::AVXInstruction(byte* data) {
SSE2_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
#undef DECLARE_SSE_AVX_DIS_CASE
+#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
+ case 0x##opcode: { \
+ AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break; \
+ }
+
+ SSE2_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AVX_DIS_CASE)
+#undef DECLARE_SSE_UNOP_AVX_DIS_CASE
default:
UnimplementedInstruction();
}
@@ -1815,8 +1819,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
}
SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSSE3_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_PMOV_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
#undef SSE34_DIS_CASE
default:
@@ -2049,7 +2054,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xD2) {
mnemonic = "psrld";
} else if (opcode == 0xD3) {
- mnemonic = "psrld";
+ mnemonic = "psrlq";
} else if (opcode == 0xD4) {
mnemonic = "paddq";
} else if (opcode == 0xD5) {
diff --git a/deps/v8/src/execution/arguments-inl.h b/deps/v8/src/execution/arguments-inl.h
index 4565f5d265..0be2325837 100644
--- a/deps/v8/src/execution/arguments-inl.h
+++ b/deps/v8/src/execution/arguments-inl.h
@@ -9,20 +9,26 @@
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h" // TODO(jkummerow): Just smi-inl.h.
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
-template <class S>
-Handle<S> Arguments::at(int index) const {
- return Handle<S>::cast(at<Object>(index));
+template <ArgumentsType T>
+int Arguments<T>::smi_at(int index) const {
+ return Smi::ToInt(Object(*address_of_arg_at(index)));
}
-int Arguments::smi_at(int index) const {
- return Smi::ToInt(Object(*address_of_arg_at(index)));
+template <ArgumentsType T>
+int Arguments<T>::tagged_index_at(int index) const {
+ Address raw = *address_of_arg_at(index);
+ return static_cast<int>(TaggedIndex(raw).value());
}
-double Arguments::number_at(int index) const { return (*this)[index].Number(); }
+template <ArgumentsType T>
+double Arguments<T>::number_at(int index) const {
+ return (*this)[index].Number();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index 77bbe62dfc..d2798e6f76 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -30,6 +30,7 @@ namespace internal {
// Note that length_ (whose value is in the integer range) is defined
// as intptr_t to provide endian-neutrality on 64-bit archs.
+template <ArgumentsType arguments_type>
class Arguments {
public:
Arguments(int length, Address* arguments)
@@ -46,6 +47,8 @@ class Arguments {
inline int smi_at(int index) const;
+ inline int tagged_index_at(int index) const;
+
inline double number_at(int index) const;
inline void set_at(int index, Object value) {
@@ -57,26 +60,47 @@ class Arguments {
}
inline Address* address_of_arg_at(int index) const {
- DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
+ DCHECK_LE(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
+ uintptr_t offset = index * kSystemPointerSize;
+#ifdef V8_REVERSE_JSARGS
+ if (arguments_type == ArgumentsType::kJS) {
+ offset = (length_ - index - 1) * kSystemPointerSize;
+ }
+#endif
return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) -
- index * kSystemPointerSize);
+ offset);
}
// Get the total number of arguments including the receiver.
int length() const { return static_cast<int>(length_); }
// Arguments on the stack are in reverse order (compared to an array).
- FullObjectSlot first_slot() const { return slot_at(length() - 1); }
- FullObjectSlot last_slot() const { return slot_at(0); }
+ FullObjectSlot first_slot() const {
+ int index = length() - 1;
+#ifdef V8_REVERSE_JSARGS
+ if (arguments_type == ArgumentsType::kJS) index = 0;
+#endif
+ return slot_at(index);
+ }
+
+ FullObjectSlot last_slot() const {
+ int index = 0;
+#ifdef V8_REVERSE_JSARGS
+ if (arguments_type == ArgumentsType::kJS) index = length() - 1;
+#endif
+ return slot_at(index);
+ }
private:
intptr_t length_;
Address* arguments_;
};
-template <>
-inline Handle<Object> Arguments::at(int index) const {
- return Handle<Object>(address_of_arg_at(index));
+template <ArgumentsType T>
+template <class S>
+Handle<S> Arguments<T>::at(int index) const {
+ Handle<Object> obj = Handle<Object>(address_of_arg_at(index));
+ return Handle<S>::cast(obj);
}
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
@@ -90,7 +114,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, InternalType, Convert, Name) \
- static V8_INLINE InternalType __RT_impl_##Name(Arguments args, \
+ static V8_INLINE InternalType __RT_impl_##Name(RuntimeArguments args, \
Isolate* isolate); \
\
V8_NOINLINE static Type Stats_##Name(int args_length, Address* args_object, \
@@ -98,7 +122,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::k##Name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Runtime_" #Name); \
- Arguments args(args_length, args_object); \
+ RuntimeArguments args(args_length, args_object); \
return Convert(__RT_impl_##Name(args, isolate)); \
} \
\
@@ -108,11 +132,11 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { \
return Stats_##Name(args_length, args_object, isolate); \
} \
- Arguments args(args_length, args_object); \
+ RuntimeArguments args(args_length, args_object); \
return Convert(__RT_impl_##Name(args, isolate)); \
} \
\
- static InternalType __RT_impl_##Name(Arguments args, Isolate* isolate)
+ static InternalType __RT_impl_##Name(RuntimeArguments args, Isolate* isolate)
#define CONVERT_OBJECT(x) (x).ptr()
#define CONVERT_OBJECTPAIR(x) (x)
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index faa3cc8ab7..cd9c4527b3 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_ARM_FRAME_CONSTANTS_ARM_H_
#define V8_EXECUTION_ARM_FRAME_CONSTANTS_ARM_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -36,6 +37,44 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kDoubleSize;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {r0, r1, r2, r3, r4, r5, r6, r8, r9}
+ static constexpr uint32_t kPushedGpRegs = 0b1101111111;
+ // {d0 .. d12}
+ static constexpr int kFirstPushedFpReg = 0;
+ static constexpr int kLastPushedFpReg = 12;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ kLastPushedFpReg - kFirstPushedFpReg + 1;
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -TypedFrameConstants::kFixedFrameSizeFromFp -
+ kSystemPointerSize * kNumPushedGpRegisters;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kDoubleSize * kNumPushedFpRegisters;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_LE(kFirstPushedFpReg, reg_code);
+ DCHECK_GE(kLastPushedFpReg, reg_code);
+ return kLastPushedFpRegisterOffset +
+ (reg_code - kFirstPushedFpReg) * kDoubleSize;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 193a996676..a409dc97d2 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -19,6 +19,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/diagnostics/disasm.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
@@ -43,8 +44,6 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
class ArmDebugger {
public:
explicit ArmDebugger(Simulator* sim) : sim_(sim) {}
-
- void Stop(Instruction* instr);
void Debug();
private:
@@ -61,26 +60,20 @@ class ArmDebugger {
bool GetVFPSingleValue(const char* desc, float* value);
bool GetVFPDoubleValue(const char* desc, double* value);
- // Set or delete a breakpoint. Returns true if successful.
+ // Set or delete breakpoint (there can be only one).
bool SetBreakpoint(Instruction* breakpc);
- bool DeleteBreakpoint(Instruction* breakpc);
+ void DeleteBreakpoint();
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
+ // Undo and redo the breakpoint. This is needed to bracket disassembly and
+ // execution to skip past the breakpoint when run from the debugger.
+ void UndoBreakpoint();
+ void RedoBreakpoint();
};
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Print the stop message and code if it is not the default code.
- if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u\n", code);
- } else {
- PrintF("Simulator hit\n");
- }
- Debug();
+void Simulator::DebugAtNextPC() {
+ PrintF("Starting debugger on the next instruction:\n");
+ set_pc(get_pc() + kInstrSize);
+ ArmDebugger(this).Debug();
}
int32_t ArmDebugger::GetRegisterValue(int regnum) {
@@ -148,25 +141,33 @@ bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
return true;
}
-bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
+namespace {
+// This function is dangerous, but it's only available in non-production
+// (simulator) builds.
+void SetInstructionBitsInCodeSpace(Instruction* instr, Instr value,
+ Heap* heap) {
+ CodeSpaceMemoryModificationScope scope(heap);
+ instr->SetInstructionBits(value);
+}
+} // namespace
+void ArmDebugger::DeleteBreakpoint() {
+ UndoBreakpoint();
sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
- return true;
}
-void ArmDebugger::UndoBreakpoints() {
+void ArmDebugger::UndoBreakpoint() {
if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ SetInstructionBitsInCodeSpace(sim_->break_pc_, sim_->break_instr_,
+ sim_->isolate_->heap());
}
}
-void ArmDebugger::RedoBreakpoints() {
+void ArmDebugger::RedoBreakpoint() {
if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ SetInstructionBitsInCodeSpace(sim_->break_pc_, kBreakpointInstr,
+ sim_->isolate_->heap());
}
}
@@ -190,9 +191,9 @@ void ArmDebugger::Debug() {
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
+ // Unset breakpoint while running in the debugger shell, making it invisible
+ // to all commands.
+ UndoBreakpoint();
while (!done && !sim_->has_bad_pc()) {
if (last_pc != sim_->get_pc()) {
@@ -292,7 +293,8 @@ void ArmDebugger::Debug() {
} else {
PrintF("printobject <value>\n");
}
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
int32_t* cur = nullptr;
int32_t* end = nullptr;
int next_arg = 1;
@@ -319,20 +321,23 @@ void ArmDebugger::Debug() {
}
end = cur + words;
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %d", Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
}
- PrintF(")");
}
PrintF("\n");
cur++;
@@ -402,9 +407,7 @@ void ArmDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(nullptr)) {
- PrintF("deleting breakpoint failed\n");
- }
+ DeleteBreakpoint();
} else if (strcmp(cmd, "flags") == 0) {
PrintF("N flag: %d; ", sim_->n_flag_);
PrintF("Z flag: %d; ", sim_->z_flag_);
@@ -421,8 +424,9 @@ void ArmDebugger::Debug() {
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
- if (sim_->isStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
+ if (stop_instr->IsStop()) {
+ SetInstructionBitsInCodeSpace(stop_instr, kNopInstr,
+ sim_->isolate_->heap());
} else {
PrintF("Not at debugger stop.\n");
}
@@ -486,6 +490,10 @@ void ArmDebugger::Debug() {
PrintF(" dump stack content, default dump 10 words)\n");
PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
PrintF("disasm [<instructions>]\n");
PrintF("disasm [<address/register>]\n");
PrintF("disasm [[<address/register>] <instructions>]\n");
@@ -526,9 +534,9 @@ void ArmDebugger::Debug() {
}
}
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
+ // Reinstall breakpoint to stop execution and enter the debugger shell when
+ // hit.
+ RedoBreakpoint();
#undef COMMAND_SIZE
#undef ARG_SIZE
@@ -1785,13 +1793,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_pc(get_register(lr));
break;
}
- case kBreakpoint: {
- ArmDebugger dbg(this);
- dbg.Debug();
+ case kBreakpoint:
+ ArmDebugger(this).Debug();
break;
- }
// stop uses all codes greater than 1 << 23.
- default: {
+ default:
if (svc >= (1 << 23)) {
uint32_t code = svc & kStopCodeMask;
if (isWatchedStop(code)) {
@@ -1800,15 +1806,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
if (isEnabledStop(code)) {
- ArmDebugger dbg(this);
- dbg.Stop(instr);
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u. ", code);
+ } else {
+ PrintF("Simulator hit stop. ");
+ }
+ DebugAtNextPC();
}
} else {
// This is not a valid svc code.
UNREACHABLE();
- break;
}
- }
}
}
@@ -1848,10 +1856,6 @@ Float64 Simulator::canonicalizeNaN(Float64 value) {
}
// Stop helper functions.
-bool Simulator::isStopInstruction(Instruction* instr) {
- return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
-}
-
bool Simulator::isWatchedStop(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
@@ -2243,12 +2247,10 @@ void Simulator::DecodeType01(Instruction* instr) {
set_register(lr, old_pc + kInstrSize);
break;
}
- case BKPT: {
- ArmDebugger dbg(this);
- PrintF("Simulator hit BKPT.\n");
- dbg.Debug();
+ case BKPT:
+ PrintF("Simulator hit BKPT. ");
+ DebugAtNextPC();
break;
- }
default:
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/execution/arm/simulator-arm.h b/deps/v8/src/execution/arm/simulator-arm.h
index 0a21b30ac3..e577e0f815 100644
--- a/deps/v8/src/execution/arm/simulator-arm.h
+++ b/deps/v8/src/execution/arm/simulator-arm.h
@@ -330,9 +330,9 @@ class Simulator : public SimulatorBase {
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
+ void DebugAtNextPC();
// Stop helper functions.
- inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index 4617d48d71..4a09e5b911 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_ARM64_FRAME_CONSTANTS_ARM64_H_
#define V8_EXECUTION_ARM64_FRAME_CONSTANTS_ARM64_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/execution/frame-constants.h"
@@ -87,6 +88,46 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kDoubleSize;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {x0 .. x28} \ {x16, x17, x18, x26, x27}
+ static constexpr uint32_t kPushedGpRegs =
+ (1 << 29) - 1 - (1 << 16) - (1 << 17) - (1 << 18) - (1 << 26) - (1 << 27);
+ // {d0 .. d29}; {d15} is not used, but we still keep it for alignment reasons
+ // (the frame size needs to be a multiple of 16).
+ static constexpr uint32_t kPushedFpRegs = (1 << 30) - 1;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ // Header is padded to 16 byte (see {MacroAssembler::EnterFrame}).
+ -RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) -
+ kSystemPointerSize * kNumPushedGpRegisters;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kDoubleSize * kNumPushedFpRegisters;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arm64/pointer-authentication-arm64.h b/deps/v8/src/execution/arm64/pointer-authentication-arm64.h
new file mode 100644
index 0000000000..c54a59f29c
--- /dev/null
+++ b/deps/v8/src/execution/arm64/pointer-authentication-arm64.h
@@ -0,0 +1,164 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ARM64_POINTER_AUTHENTICATION_ARM64_H_
+#define V8_EXECUTION_ARM64_POINTER_AUTHENTICATION_ARM64_H_
+
+#include "src/execution/pointer-authentication.h"
+
+#include "src/common/globals.h"
+#include "src/execution/arm64/simulator-arm64.h"
+
+// TODO(v8:10026): Replace hints with instruction aliases, when supported.
+#define AUTIA1716 "hint #12"
+#define PACIA1716 "hint #8"
+#define XPACLRI "hint #7"
+
+namespace v8 {
+namespace internal {
+
+// The following functions execute on the host and therefore need a different
+// path based on whether we are simulating arm64 or not.
+
+// clang-format fails to detect this file as C++, turn it off.
+// clang-format off
+
+// Authenticate the address stored in {pc_address}. {offset_from_sp} is the
+// offset between {pc_address} and the pointer used as a context for signing.
+V8_INLINE Address PointerAuthentication::AuthenticatePC(
+ Address* pc_address, unsigned offset_from_sp) {
+ uint64_t sp = reinterpret_cast<uint64_t>(pc_address) + offset_from_sp;
+ uint64_t pc = reinterpret_cast<uint64_t>(*pc_address);
+#ifdef USE_SIMULATOR
+ pc = Simulator::AuthPAC(pc, sp, Simulator::kPACKeyIA,
+ Simulator::kInstructionPointer);
+#else
+ asm volatile(
+ " mov x17, %[pc]\n"
+ " mov x16, %[stack_ptr]\n"
+ " " AUTIA1716 "\n"
+ " ldr xzr, [x17]\n"
+ " mov %[pc], x17\n"
+ : [pc] "+r"(pc)
+ : [stack_ptr] "r"(sp)
+ : "x16", "x17");
+#endif
+ return pc;
+}
+
+// Strip Pointer Authentication Code (PAC) from {pc} and return the raw value.
+V8_INLINE Address PointerAuthentication::StripPAC(Address pc) {
+#ifdef USE_SIMULATOR
+ return Simulator::StripPAC(pc, Simulator::kInstructionPointer);
+#else
+ asm volatile(
+ " mov x16, lr\n"
+ " mov lr, %[pc]\n"
+ " " XPACLRI "\n"
+ " mov %[pc], lr\n"
+ " mov lr, x16\n"
+ : [pc] "+r"(pc)
+ :
+ : "x16", "lr");
+ return pc;
+#endif
+}
+
+// Sign {pc} using {sp}.
+V8_INLINE Address PointerAuthentication::SignPCWithSP(Address pc, Address sp) {
+#ifdef USE_SIMULATOR
+ return Simulator::AddPAC(pc, sp, Simulator::kPACKeyIA,
+ Simulator::kInstructionPointer);
+#else
+ asm volatile(
+ " mov x17, %[pc]\n"
+ " mov x16, %[sp]\n"
+ " " PACIA1716 "\n"
+ " mov %[pc], x17\n"
+ : [pc] "+r"(pc)
+ : [sp] "r"(sp)
+ : "x16", "x17");
+ return pc;
+#endif
+}
+
+// Authenticate the address stored in {pc_address} and replace it with
+// {new_pc}, after signing it. {offset_from_sp} is the offset between
+// {pc_address} and the pointer used as a context for signing.
+V8_INLINE void PointerAuthentication::ReplacePC(Address* pc_address,
+ Address new_pc,
+ int offset_from_sp) {
+ uint64_t sp = reinterpret_cast<uint64_t>(pc_address) + offset_from_sp;
+ uint64_t old_pc = reinterpret_cast<uint64_t>(*pc_address);
+#ifdef USE_SIMULATOR
+ uint64_t auth_old_pc = Simulator::AuthPAC(old_pc, sp, Simulator::kPACKeyIA,
+ Simulator::kInstructionPointer);
+ uint64_t raw_old_pc =
+ Simulator::StripPAC(old_pc, Simulator::kInstructionPointer);
+ // Verify that the old address is authenticated.
+ CHECK_EQ(auth_old_pc, raw_old_pc);
+ new_pc = Simulator::AddPAC(new_pc, sp, Simulator::kPACKeyIA,
+ Simulator::kInstructionPointer);
+#else
+ // Only store newly signed address after we have verified that the old
+ // address is authenticated.
+ asm volatile(
+ " mov x17, %[new_pc]\n"
+ " mov x16, %[sp]\n"
+ " " PACIA1716 "\n"
+ " mov %[new_pc], x17\n"
+ " mov x17, %[old_pc]\n"
+ " " AUTIA1716 "\n"
+ " ldr xzr, [x17]\n"
+ : [new_pc] "+&r"(new_pc)
+ : [sp] "r"(sp), [old_pc] "r"(old_pc)
+ : "x16", "x17");
+#endif
+ *pc_address = new_pc;
+}
+
+// Authenticate the address stored in {pc_address} based on {old_context} and
+// replace it with the same address signed with {new_context} instead.
+V8_INLINE void PointerAuthentication::ReplaceContext(Address* pc_address,
+ Address old_context,
+ Address new_context) {
+ uint64_t old_signed_pc = static_cast<uint64_t>(*pc_address);
+ uint64_t new_pc;
+#ifdef USE_SIMULATOR
+ uint64_t auth_pc =
+ Simulator::AuthPAC(old_signed_pc, old_context, Simulator::kPACKeyIA,
+ Simulator::kInstructionPointer);
+ uint64_t raw_pc =
+ Simulator::StripPAC(auth_pc, Simulator::kInstructionPointer);
+ // Verify that the old address is authenticated.
+ CHECK_EQ(raw_pc, auth_pc);
+ new_pc = Simulator::AddPAC(raw_pc, new_context, Simulator::kPACKeyIA,
+ Simulator::kInstructionPointer);
+#else
+ // Only store newly signed address after we have verified that the old
+ // address is authenticated.
+ asm volatile(
+ " mov x17, %[old_pc]\n"
+ " mov x16, %[old_ctx]\n"
+ " " AUTIA1716 "\n"
+ " mov x16, %[new_ctx]\n"
+ " " PACIA1716 "\n"
+ " mov %[new_pc], x17\n"
+ " mov x17, %[old_pc]\n"
+ " mov x16, %[old_ctx]\n"
+ " " AUTIA1716 "\n"
+ " ldr xzr, [x17]\n"
+ : [new_pc] "=&r"(new_pc)
+ : [old_pc] "r"(old_signed_pc), [old_ctx] "r"(old_context),
+ [new_ctx] "r"(new_context)
+ : "x16", "x17");
+#endif
+ *pc_address = new_pc;
+}
+
+// clang-format on
+
+} // namespace internal
+} // namespace v8
+#endif // V8_EXECUTION_ARM64_POINTER_AUTHENTICATION_ARM64_H_
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 8ee43c07e8..adc856a606 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -194,11 +194,13 @@ void Simulator::CheckPCSComplianceAndRun() {
saved_fpregisters[i] = dreg_bits(PopLowestIndexAsCode(&fpregister_list));
}
int64_t original_stack = sp();
+ int64_t original_fp = fp();
#endif
// Start the simulation!
Run();
#ifdef DEBUG
DCHECK_EQ(original_stack, sp());
+ DCHECK_EQ(original_fp, fp());
// Check that callee-saved registers have been preserved.
register_list = kCalleeSaved;
fpregister_list = kCalleeSavedV;
@@ -298,8 +300,10 @@ void Simulator::SetRedirectInstruction(Instruction* instruction) {
Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate, FILE* stream)
: decoder_(decoder),
+ guard_pages_(ENABLE_CONTROL_FLOW_INTEGRITY_BOOL),
last_debugger_input_(nullptr),
log_parameters_(NO_PARAM),
+ icount_for_stop_sim_at_(0),
isolate_(isolate) {
// Setup the decoder.
decoder_->AppendVisitor(this);
@@ -314,6 +318,7 @@ Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Simulator::Simulator()
: decoder_(nullptr),
+ guard_pages_(ENABLE_CONTROL_FLOW_INTEGRITY_BOOL),
last_debugger_input_(nullptr),
log_parameters_(NO_PARAM),
isolate_(nullptr) {
@@ -361,6 +366,8 @@ void Simulator::ResetState() {
// Reset debug helpers.
breakpoints_.clear();
break_on_next_ = false;
+
+ btype_ = DefaultBType;
}
Simulator::~Simulator() {
@@ -378,8 +385,24 @@ void Simulator::Run() {
LogAllWrittenRegisters();
pc_modified_ = false;
- while (pc_ != kEndOfSimAddress) {
- ExecuteInstruction();
+
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instruction count.
+ while (pc_ != kEndOfSimAddress) {
+ icount_for_stop_sim_at_ =
+ base::AddWithWraparound(icount_for_stop_sim_at_, 1);
+ if (icount_for_stop_sim_at_ == ::v8::internal::FLAG_stop_sim_at) {
+ Debug();
+ }
+ ExecuteInstruction();
+ }
}
}
@@ -1494,6 +1517,20 @@ void Simulator::VisitConditionalBranch(Instruction* instr) {
}
}
+Simulator::BType Simulator::GetBTypeFromInstruction(
+ const Instruction* instr) const {
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR:
+ return BranchAndLink;
+ case BR:
+ if (!PcIsInGuardedPage() || (instr->Rn() == 16) || (instr->Rn() == 17)) {
+ return BranchFromUnguardedOrToIP;
+ }
+ return BranchFromGuardedNotToIP;
+ }
+ return DefaultBType;
+}
+
void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
Instruction* target = reg<Instruction*>(instr->Rn());
switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
@@ -1513,6 +1550,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
default:
UNIMPLEMENTED();
}
+ set_btype(GetBTypeFromInstruction(instr));
}
void Simulator::VisitTestBranch(Instruction* instr) {
@@ -3096,6 +3134,7 @@ void Simulator::VisitSystem(Instruction* instr) {
// range of immediates instead of indicating a different instruction. This
// makes the decoding tricky.
if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
+ // The BType check for PACIASP happens in CheckBType().
switch (instr->Mask(SystemPAuthMask)) {
#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \
case PACI##SUFFIX: \
@@ -3145,6 +3184,11 @@ void Simulator::VisitSystem(Instruction* instr) {
switch (instr->ImmHint()) {
case NOP:
case CSDB:
+ case BTI_jc:
+ case BTI:
+ case BTI_c:
+ case BTI_j:
+ // The BType checks happen in CheckBType().
break;
default:
UNIMPLEMENTED();
@@ -3386,7 +3430,8 @@ void Simulator::Debug() {
// stack / mem
// ----------------------------------------------------------
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
int64_t* cur = nullptr;
int64_t* end = nullptr;
int next_arg = 1;
@@ -3418,20 +3463,23 @@ void Simulator::Debug() {
}
end = cur + words;
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
while (cur < end) {
PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
reinterpret_cast<uint64_t>(cur), *cur, *cur);
- Object obj(*cur);
- Heap* current_heap = isolate_->heap();
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %" PRId32, Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
+ if (!skip_obj_print) {
+ Object obj(*cur);
+ Heap* current_heap = isolate_->heap();
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %" PRId32, Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
}
- PrintF(")");
}
PrintF("\n");
cur++;
@@ -3440,13 +3488,12 @@ void Simulator::Debug() {
// trace / t
// -------------------------------------------------------------
} else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
- if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
- (LOG_DISASM | LOG_REGS)) {
- PrintF("Enabling disassembly and registers tracing\n");
- set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
+ if ((log_parameters() & LOG_ALL) != LOG_ALL) {
+ PrintF("Enabling disassembly, registers and memory write tracing\n");
+ set_log_parameters(log_parameters() | LOG_ALL);
} else {
- PrintF("Disabling disassembly and registers tracing\n");
- set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
+ PrintF("Disabling disassembly, registers and memory write tracing\n");
+ set_log_parameters(log_parameters() & ~LOG_ALL);
}
// break / b
@@ -3509,6 +3556,10 @@ void Simulator::Debug() {
"mem\n"
" mem <address> [<words>]\n"
" Dump memory content, default dump 10 words\n"
+ "dump\n"
+ " dump <address> [<words>]\n"
+ " Dump memory content without pretty printing JS objects, "
+ "default dump 10 words\n"
"trace / t\n"
" Toggle disassembly and register tracing\n"
"break / b\n"
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index ae3b6867a9..cd4137c8e5 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -770,8 +770,125 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
virtual void Decode(Instruction* instr) { decoder_->Decode(instr); }
+ // Branch Target Identification (BTI)
+ //
+ // Executing an instruction updates PSTATE.BTYPE, as described in the table
+ // below. Execution of an instruction on a guarded page is allowed if either:
+ // * PSTATE.BTYPE is 00, or
+ // * it is a BTI or PACI[AB]SP instruction that accepts the current value of
+ // PSTATE.BTYPE (as described in the table below), or
+ // * it is BRK or HLT instruction that causes some higher-priority exception.
+ //
+ // --------------------------------------------------------------------------
+ // | Last-executed instruction | Sets | Accepted by |
+ // | | BTYPE to | BTI | BTI j | BTI c | BTI jc |
+ // --------------------------------------------------------------------------
+ // | - BR from an unguarded page. | | | | | |
+ // | - BR from guarded page, | | | | | |
+ // | to x16 or x17. | 01 | | X | X | X |
+ // --------------------------------------------------------------------------
+ // | BR from guarded page, | | | | | |
+ // | not to x16 or x17. | 11 | | X | | X |
+ // --------------------------------------------------------------------------
+ // | BLR | 10 | | | X | X |
+ // --------------------------------------------------------------------------
+ // | Any other instruction | | | | | |
+ // |(including RET). | 00 | X | X | X | X |
+ // --------------------------------------------------------------------------
+ //
+ // PACI[AB]SP is treated either like "BTI c" or "BTI jc", according to the
+ // value of SCTLR_EL1.BT0. Details available in ARM DDI 0487E.a, D5-2580.
+
+ enum BType {
+ // Set when executing any instruction, except those cases listed below.
+ DefaultBType = 0,
+
+ // Set when an indirect branch is taken from an unguarded page, or from a
+ // guarded page to ip0 or ip1 (x16 or x17), eg "br ip0".
+ BranchFromUnguardedOrToIP = 1,
+
+ // Set when an indirect branch and link (call) is taken, eg. "blr x0".
+ BranchAndLink = 2,
+
+ // Set when an indirect branch is taken from a guarded page to a register
+ // that is not ip0 or ip1 (x16 or x17), eg, "br x0".
+ BranchFromGuardedNotToIP = 3
+ };
+
+ BType btype() const { return btype_; }
+ void ResetBType() { btype_ = DefaultBType; }
+ void set_btype(BType btype) { btype_ = btype; }
+
+ // Helper function to determine BType for branches.
+ BType GetBTypeFromInstruction(const Instruction* instr) const;
+
+ bool PcIsInGuardedPage() const { return guard_pages_; }
+ void SetGuardedPages(bool guard_pages) { guard_pages_ = guard_pages; }
+
+ void CheckBTypeForPAuth() {
+ DCHECK(pc_->IsPAuth());
+ Instr instr = pc_->Mask(SystemPAuthMask);
+ // Only PACI[AB]SP allowed here, but we don't currently support PACIBSP.
+ CHECK_EQ(instr, PACIASP);
+ // Check BType allows PACI[AB]SP instructions.
+ switch (btype()) {
+ case BranchFromGuardedNotToIP:
+ // This case depends on the value of SCTLR_EL1.BT0, which we assume
+ // here to be set. This makes PACI[AB]SP behave like "BTI c",
+ // disallowing its execution when BTYPE is BranchFromGuardedNotToIP
+ // (0b11).
+ FATAL("Executing PACIASP with wrong BType.");
+ case BranchFromUnguardedOrToIP:
+ case BranchAndLink:
+ break;
+ case DefaultBType:
+ UNREACHABLE();
+ }
+ }
+
+ void CheckBTypeForBti() {
+ DCHECK(pc_->IsBti());
+ switch (pc_->ImmHint()) {
+ case BTI_jc:
+ break;
+ case BTI: {
+ DCHECK(btype() != DefaultBType);
+ FATAL("Executing BTI with wrong BType (expected 0, got %d).", btype());
+ break;
+ }
+ case BTI_c:
+ if (btype() == BranchFromGuardedNotToIP) {
+ FATAL("Executing BTI c with wrong BType (3).");
+ }
+ break;
+ case BTI_j:
+ if (btype() == BranchAndLink) {
+ FATAL("Executing BTI j with wrong BType (2).");
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
+ void CheckBType() {
+ // On guarded pages, if BType is not zero, take an exception on any
+ // instruction other than BTI, PACI[AB]SP, HLT or BRK.
+ if (PcIsInGuardedPage() && (btype() != DefaultBType)) {
+ if (pc_->IsPAuth()) {
+ CheckBTypeForPAuth();
+ } else if (pc_->IsBti()) {
+ CheckBTypeForBti();
+ } else if (!pc_->IsException()) {
+ FATAL("Executing non-BTI instruction with wrong BType.");
+ }
+ }
+ }
+
void ExecuteInstruction() {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstrSize));
+ CheckBType();
+ ResetBType();
CheckBreakNext();
Decode(pc_);
increment_pc();
@@ -2192,6 +2309,13 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
bool pc_modified_;
Instruction* pc_;
+ // Branch type register, used for branch target identification.
+ BType btype_;
+
+ // Global flag for enabling guarded pages.
+ // TODO(arm64): implement guarding at page granularity, rather than globally.
+ bool guard_pages_;
+
static const char* xreg_names[];
static const char* wreg_names[];
static const char* sreg_names[];
@@ -2368,6 +2492,8 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
int log_parameters_;
+ // Instruction counter only valid if FLAG_stop_sim_at isn't 0.
+ int icount_for_stop_sim_at_;
Isolate* isolate_;
};
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 61219a21d7..e41c3a6286 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -229,12 +229,7 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
Handle<Context> result =
isolate->factory()->NewScriptContext(native_context, scope_info);
- int header = scope_info->ContextHeaderLength();
- for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
- if (scope_info->ContextLocalInitFlag(var) == kNeedsInitialization) {
- result->set(header + var, ReadOnlyRoots(isolate).the_hole_value());
- }
- }
+ result->Initialize(isolate);
Handle<ScriptContextTable> new_script_context_table =
ScriptContextTable::Extend(script_context, result);
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index c215bbaeab..8c3f774319 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -21,6 +21,9 @@ namespace internal {
// header, with slot index 2 corresponding to the current function context and 3
// corresponding to the frame marker/JSFunction.
//
+// If V8_REVERSE_JSARGS is set, then the parameters are reversed in the stack,
+// i.e., the first parameter (the receiver) is just above the return address.
+//
// slot JS frame
// +-----------------+--------------------------------
// -n-1 | parameter 0 | ^
@@ -160,7 +163,7 @@ class OptimizedBuiltinFrameConstants : public StandardFrameConstants {
static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize;
};
-// TypedFrames have a SMI type maker value below the saved FP/constant pool to
+// TypedFrames have a type maker value below the saved FP/constant pool to
// distinguish them from StandardFrames, which have a context in that position
// instead.
//
@@ -202,8 +205,7 @@ class TypedFrameConstants : public CommonFrameConstants {
StandardFrameConstants::kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize;
static constexpr int kFirstPushedFrameValueOffset =
- -StandardFrameConstants::kCPSlotSize - kFrameTypeSize -
- kSystemPointerSize;
+ -kFixedFrameSizeFromFp - kSystemPointerSize;
};
#define TYPED_FRAME_PUSHED_VALUE_OFFSET(x) \
@@ -326,8 +328,13 @@ class InterpreterFrameConstants : public AllStatic {
StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kSystemPointerSize;
// FP-relative.
+#ifdef V8_REVERSE_JSARGS
+ static constexpr int kFirstParamFromFp =
+ StandardFrameConstants::kCallerSPOffset;
+#else
static constexpr int kLastParamFromFp =
StandardFrameConstants::kCallerSPOffset;
+#endif
static constexpr int kCallerPCOffsetFromFp =
StandardFrameConstants::kCallerPCOffset;
static constexpr int kBytecodeArrayFromFp =
@@ -376,7 +383,7 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/arm64/frame-constants-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/execution/arm/frame-constants-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/execution/ppc/frame-constants-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/execution/mips/frame-constants-mips.h" // NOLINT
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index f1b979b7be..e73cca4f05 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -9,6 +9,7 @@
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
+#include "src/execution/pointer-authentication.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -69,6 +70,16 @@ inline StackHandler* StackFrame::top_handler() const {
return iterator_->handler();
}
+inline Address StackFrame::callee_pc() const {
+ return state_.callee_pc_address ? ReadPC(state_.callee_pc_address)
+ : kNullAddress;
+}
+
+inline Address StackFrame::pc() const { return ReadPC(pc_address()); }
+
+inline Address StackFrame::ReadPC(Address* pc_address) {
+ return PointerAuthentication::AuthenticatePC(pc_address, kSystemPointerSize);
+}
inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
if (return_address_location_resolver_ == nullptr) {
@@ -179,11 +190,15 @@ inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
Address JavaScriptFrame::GetParameterSlot(int index) const {
- int param_count = ComputeParametersCount();
DCHECK(-1 <= index &&
- (index < param_count ||
- param_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ (index < ComputeParametersCount() ||
+ ComputeParametersCount() == kDontAdaptArgumentsSentinel));
+#ifdef V8_REVERSE_JSARGS
+ int parameter_offset = (index + 1) * kSystemPointerSize;
+#else
+ int param_count = ComputeParametersCount();
int parameter_offset = (param_count - index - 1) * kSystemPointerSize;
+#endif
return caller_sp() + parameter_offset;
}
@@ -231,6 +246,10 @@ inline WasmInterpreterEntryFrame::WasmInterpreterEntryFrame(
StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
+inline WasmDebugBreakFrame::WasmDebugBreakFrame(
+ StackFrameIteratorBase* iterator)
+ : StandardFrame(iterator) {}
+
inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 8337a63e17..7d405efa5e 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -491,16 +491,18 @@ Code StackFrame::LookupCode() const {
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code holder) {
- Address pc = *pc_address;
+ Address old_pc = ReadPC(pc_address);
DCHECK(ReadOnlyHeap::Contains(holder) ||
- holder.GetHeap()->GcSafeCodeContains(holder, pc));
- unsigned pc_offset = static_cast<unsigned>(pc - holder.InstructionStart());
+ holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
+ unsigned pc_offset =
+ static_cast<unsigned>(old_pc - holder.InstructionStart());
Object code = holder;
v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&code));
if (code == holder) return;
holder = Code::unchecked_cast(code);
- pc = holder.InstructionStart() + pc_offset;
- *pc_address = pc;
+ Address pc = holder.InstructionStart() + pc_offset;
+ // TODO(v8:10026): avoid replacing a signed pointer.
+ PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize);
if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
*constant_pool_address = holder.constant_pool();
}
@@ -521,6 +523,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
kSystemPointerSize);
intptr_t marker = Memory<intptr_t>(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+ Address pc = StackFrame::ReadPC(state->pc_address);
if (!iterator->can_access_heap_objects_) {
// TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
// means that we are being called from the profiler, which can interrupt
@@ -535,15 +538,13 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function.IsSmi()) {
return NATIVE;
- } else if (IsInterpreterFramePc(iterator->isolate(), *(state->pc_address),
- state)) {
+ } else if (IsInterpreterFramePc(iterator->isolate(), pc, state)) {
return INTERPRETED;
} else {
return OPTIMIZED;
}
}
} else {
- Address pc = *(state->pc_address);
// If the {pc} does not point into WebAssembly code we can rely on the
// returned {wasm_code} to be null and fall back to {GetContainingCode}.
wasm::WasmCodeRefScope code_ref_scope;
@@ -621,6 +622,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case WASM_COMPILED:
case WASM_COMPILE_LAZY:
case WASM_EXIT:
+ case WASM_DEBUG_BREAK:
return candidate;
case JS_TO_WASM:
case OPTIMIZED:
@@ -894,6 +896,7 @@ void StandardFrame::ComputeCallerState(State* state) const {
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
+ state->callee_fp = fp();
state->callee_pc_address = pc_address();
state->constant_pool_address =
reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
@@ -967,6 +970,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case CONSTRUCT:
case JS_TO_WASM:
case C_WASM_ENTRY:
+ case WASM_DEBUG_BREAK:
frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
break;
case WASM_TO_JS:
@@ -1667,6 +1671,7 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
return DeoptimizationData();
}
+#ifndef V8_REVERSE_JSARGS
Object OptimizedFrame::receiver() const {
Code code = LookupCode();
if (code.kind() == Code::BUILTIN) {
@@ -1681,6 +1686,7 @@ Object OptimizedFrame::receiver() const {
return JavaScriptFrame::receiver();
}
}
+#endif
void OptimizedFrame::GetFunctions(
std::vector<SharedFunctionInfo>* functions) const {
@@ -1779,7 +1785,7 @@ void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
DCHECK_EQ(InterpreterFrameConstants::kBytecodeOffsetFromFp,
InterpreterFrameConstants::kExpressionsOffset -
index * kSystemPointerSize);
- int raw_offset = new_offset + BytecodeArray::kHeaderSize - kHeapObjectTag;
+ int raw_offset = BytecodeArray::kHeaderSize - kHeapObjectTag + new_offset;
SetExpression(index, Smi::FromInt(raw_offset));
}
@@ -2028,6 +2034,26 @@ Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
+void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
+ // Nothing to iterate here. This will change once we support references in
+ // Liftoff.
+}
+
+Code WasmDebugBreakFrame::unchecked_code() const { return Code(); }
+
+void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add("WASM DEBUG BREAK");
+ if (mode != OVERVIEW) accumulator->Add("\n");
+}
+
+Address WasmDebugBreakFrame::GetCallerStackPointer() const {
+ // WasmDebugBreak does not receive any arguments, hence the stack pointer of
+ // the caller is at a fixed offset from the frame pointer.
+ return fp() + WasmDebugBreakFrameConstants::kCallerSPOffset;
+}
+
Code WasmCompileLazyFrame::unchecked_code() const { return Code(); }
WasmInstanceObject WasmCompileLazyFrame::wasm_instance() const {
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 3ffdee4b05..bd50cda8f8 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -70,6 +70,7 @@ class StackHandler {
V(WASM_TO_JS, WasmToJsFrame) \
V(JS_TO_WASM, JsToWasmFrame) \
V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
+ V(WASM_DEBUG_BREAK, WasmDebugBreakFrame) \
V(C_WASM_ENTRY, CWasmEntryFrame) \
V(WASM_EXIT, WasmExitFrame) \
V(WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
@@ -117,6 +118,7 @@ class StackFrame {
Address sp = kNullAddress;
Address fp = kNullAddress;
Address* pc_address = nullptr;
+ Address callee_fp = kNullAddress;
Address* callee_pc_address = nullptr;
Address* constant_pool_address = nullptr;
};
@@ -181,6 +183,7 @@ class StackFrame {
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
+ bool is_wasm_debug_break() const { return type() == WASM_DEBUG_BREAK; }
bool is_wasm_interpreter_entry() const {
return type() == WASM_INTERPRETER_ENTRY;
}
@@ -215,9 +218,8 @@ class StackFrame {
// Accessors.
Address sp() const { return state_.sp; }
Address fp() const { return state_.fp; }
- Address callee_pc() const {
- return state_.callee_pc_address ? *state_.callee_pc_address : kNullAddress;
- }
+ Address callee_fp() const { return state_.callee_fp; }
+ inline Address callee_pc() const;
Address caller_sp() const { return GetCallerStackPointer(); }
// If this frame is optimized and was dynamically aligned return its old
@@ -225,8 +227,7 @@ class StackFrame {
// up one word and become unaligned.
Address UnpaddedFP() const;
- Address pc() const { return *pc_address(); }
- void set_pc(Address pc) { *pc_address() = pc; }
+ inline Address pc() const;
Address constant_pool() const { return *constant_pool_address(); }
void set_constant_pool(Address constant_pool) {
@@ -265,6 +266,8 @@ class StackFrame {
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver);
+ static inline Address ReadPC(Address* pc_address);
+
// Resolves pc_address through the resolution address function if one is set.
static inline Address* ResolveReturnAddressLocation(Address* pc_address);
@@ -820,7 +823,11 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationData GetDeoptimizationData(int* deopt_index) const;
+#ifndef V8_REVERSE_JSARGS
+ // When the arguments are reversed in the stack, receiver() is
+ // inherited from JavaScriptFrame.
Object receiver() const override;
+#endif
int ComputeParametersCount() const override;
static int StackSlotOffsetRelativeToFp(int slot_index);
@@ -1027,6 +1034,32 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
WasmModuleObject module_object() const;
};
+class WasmDebugBreakFrame final : public StandardFrame {
+ public:
+ Type type() const override { return WASM_DEBUG_BREAK; }
+
+ // GC support.
+ void Iterate(RootVisitor* v) const override;
+
+ Code unchecked_code() const override;
+
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
+
+ static WasmDebugBreakFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_wasm_debug_break());
+ return static_cast<WasmDebugBreakFrame*>(frame);
+ }
+
+ protected:
+ inline explicit WasmDebugBreakFrame(StackFrameIteratorBase*);
+
+ Address GetCallerStackPointer() const override;
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class WasmToJsFrame : public StubFrame {
public:
Type type() const override { return WASM_TO_JS; }
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 8c3b54c2a7..9861454d8c 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
#include "src/handles/handles-inl.h"
#include "src/numbers/conversions.h"
#include "src/objects/bigint.h"
@@ -105,27 +106,33 @@ Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
Object FutexEmulation::WaitJs32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
- Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
+ Object res =
+ Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
return WaitJsTranslateReturn(isolate, res);
}
Object FutexEmulation::WaitJs64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int64_t value, double rel_timeout_ms) {
- Object res = Wait64(isolate, array_buffer, addr, value, rel_timeout_ms);
+ Object res =
+ Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
return WaitJsTranslateReturn(isolate, res);
}
-Object FutexEmulation::Wait32(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer, size_t addr,
- int32_t value, double rel_timeout_ms) {
- return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
+Object FutexEmulation::WaitWasm32(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value,
+ int64_t rel_timeout_ns) {
+ return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ns >= 0,
+ rel_timeout_ns);
}
-Object FutexEmulation::Wait64(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer, size_t addr,
- int64_t value, double rel_timeout_ms) {
- return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
+Object FutexEmulation::WaitWasm64(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int64_t value,
+ int64_t rel_timeout_ns) {
+ return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ns >= 0,
+ rel_timeout_ns);
}
template <typename T>
@@ -135,24 +142,43 @@ Object FutexEmulation::Wait(Isolate* isolate,
DCHECK_LT(addr, array_buffer->byte_length());
bool use_timeout = rel_timeout_ms != V8_INFINITY;
+ int64_t rel_timeout_ns = -1;
- base::TimeDelta rel_timeout;
if (use_timeout) {
// Convert to nanoseconds.
- double rel_timeout_ns = rel_timeout_ms *
- base::Time::kNanosecondsPerMicrosecond *
- base::Time::kMicrosecondsPerMillisecond;
- if (rel_timeout_ns >
- static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ double timeout_ns = rel_timeout_ms *
+ base::Time::kNanosecondsPerMicrosecond *
+ base::Time::kMicrosecondsPerMillisecond;
+ if (timeout_ns > static_cast<double>(std::numeric_limits<int64_t>::max())) {
// 2**63 nanoseconds is 292 years. Let's just treat anything greater as
// infinite.
use_timeout = false;
} else {
- rel_timeout = base::TimeDelta::FromNanoseconds(
- static_cast<int64_t>(rel_timeout_ns));
+ rel_timeout_ns = static_cast<int64_t>(timeout_ns);
}
}
+ return Wait(isolate, array_buffer, addr, value, use_timeout, rel_timeout_ns);
+}
+
+namespace {
+double WaitTimeoutInMs(double timeout_ns) {
+ return timeout_ns < 0
+ ? V8_INFINITY
+ : timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
+ base::Time::kMicrosecondsPerMillisecond);
+}
+} // namespace
+
+template <typename T>
+Object FutexEmulation::Wait(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ T value, bool use_timeout, int64_t rel_timeout_ns) {
+ VMState<ATOMICS_WAIT> state(isolate);
+ base::TimeDelta rel_timeout =
+ base::TimeDelta::FromNanoseconds(rel_timeout_ns);
+ // We have to convert the timeout back to double for the AtomicsWaitCallback.
+ double rel_timeout_ms = WaitTimeoutInMs(static_cast<double>(rel_timeout_ns));
AtomicsWaitWakeHandle stop_handle(isolate);
isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
diff --git a/deps/v8/src/execution/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index 052b3c9c17..2d005bcfd1 100644
--- a/deps/v8/src/execution/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -126,13 +126,13 @@ class FutexEmulation : public AllStatic {
// Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
// out) as expected by Wasm.
- static Object Wait32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int32_t value, double rel_timeout_ms);
+ static Object WaitWasm32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value, int64_t rel_timeout_ns);
// Same as Wait32 above except it checks for an int64_t value in the
// array_buffer.
- static Object Wait64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int64_t value, double rel_timeout_ms);
+ static Object WaitWasm64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int64_t value, int64_t rel_timeout_ns);
// Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
// |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
@@ -154,6 +154,11 @@ class FutexEmulation : public AllStatic {
static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, double rel_timeout_ms);
+ template <typename T>
+ static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, T value, bool use_timeout,
+ int64_t rel_timeout_ns);
+
// `mutex_` protects the composition of `wait_list_` (i.e. no elements may be
// added or removed without holding this mutex), as well as the `waiting_`
// and `interrupted_` fields for each individual list node that is currently
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.h b/deps/v8/src/execution/ia32/frame-constants-ia32.h
index e579fa7352..dde0611f27 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_IA32_FRAME_CONSTANTS_IA32_H_
#define V8_EXECUTION_IA32_FRAME_CONSTANTS_IA32_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -45,6 +46,42 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kSimd128Size;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {eax, ecx, edx, esi, edi}
+ static constexpr uint32_t kPushedGpRegs = 0b11000111;
+ // {xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6}
+ static constexpr uint32_t kPushedFpRegs = 0b01111111;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kSimd128Size;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSimd128Size;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index f402296ab5..2cbc3daa66 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -62,6 +62,14 @@ class IsolateData final {
return kBuiltinsTableOffset - kIsolateRootBias;
}
+ static constexpr int fast_c_call_caller_fp_offset() {
+ return kFastCCallCallerFPOffset - kIsolateRootBias;
+ }
+
+ static constexpr int fast_c_call_caller_pc_offset() {
+ return kFastCCallCallerPCOffset - kIsolateRootBias;
+ }
+
// Root-register-relative offset of the given builtin table entry.
// TODO(ishell): remove in favour of typified id version.
static int builtin_slot_offset(int builtin_index) {
@@ -117,7 +125,7 @@ class IsolateData final {
V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize) \
V(kExternalMemoryOffset, kInt64Size) \
V(kExternalMemoryLlimitOffset, kInt64Size) \
- V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size) \
+ V(kExternalMemoryLowSinceMarkCompactOffset, kInt64Size) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kStackGuardOffset, StackGuard::kSizeInBytes) \
@@ -151,7 +159,7 @@ class IsolateData final {
int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
// Caches the amount of external memory registered at the last MC.
- int64_t external_memory_at_last_mark_compact_ = 0;
+ int64_t external_memory_low_since_mark_compact_ = 0;
// Stores the state of the caller for TurboAssembler::CallCFunction so that
// the sampling CPU profiler can iterate the stack during such calls. These
@@ -220,8 +228,9 @@ void IsolateData::AssertPredictableLayout() {
kExternalMemoryOffset);
STATIC_ASSERT(offsetof(IsolateData, external_memory_limit_) ==
kExternalMemoryLlimitOffset);
- STATIC_ASSERT(offsetof(IsolateData, external_memory_at_last_mark_compact_) ==
- kExternalMemoryAtLastMarkCompactOffset);
+ STATIC_ASSERT(
+ offsetof(IsolateData, external_memory_low_since_mark_compact_) ==
+ kExternalMemoryLowSinceMarkCompactOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_fp_) ==
kFastCCallCallerFPOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index ae786f00d5..163ab4ea8c 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -83,6 +83,18 @@ bool Isolate::is_catchable_by_javascript(Object exception) {
return exception != ReadOnlyRoots(heap()).termination_exception();
}
+bool Isolate::is_catchable_by_wasm(Object exception) {
+ if (!is_catchable_by_javascript(exception)) return false;
+ if (!exception.IsJSObject()) return true;
+ // We don't allocate, but the LookupIterator interface expects a handle.
+ DisallowHeapAllocation no_gc;
+ HandleScope handle_scope(this);
+ LookupIterator it(this, handle(JSReceiver::cast(exception), this),
+ factory()->wasm_uncatchable_symbol(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ return !JSReceiver::HasProperty(&it).FromJust();
+}
+
void Isolate::FireBeforeCallEnteredCallback() {
for (auto& callback : before_call_entered_callbacks_) {
callback(reinterpret_cast<v8::Isolate*>(this));
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 16bc1da8fa..47e03d51a1 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -23,8 +23,15 @@ inline const Isolate* GetIsolateForPtrCompr(HeapObject object) {
}
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
-#if defined V8_COMPRESS_POINTERS || defined V8_ENABLE_THIRD_PARTY_HEAP
- return GetIsolateFromWritableObject(object)->heap();
+ // Avoid using the below GetIsolateFromWritableObject because we want to be
+ // able to get the heap, but not the isolate, for off-thread objects.
+
+#if defined V8_ENABLE_THIRD_PARTY_HEAP
+ return Heap::GetIsolateFromWritableObject(object)->heap();
+#elif defined V8_COMPRESS_POINTERS
+ Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ DCHECK_NOT_NULL(isolate);
+ return isolate->heap();
#else
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -33,6 +40,9 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
}
V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
+ // We don't want to allow accessing the isolate off-thread.
+ DCHECK(!Heap::InOffThreadSpace(object));
+
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object);
#elif defined V8_COMPRESS_POINTERS
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 3da8963c53..c9c6225eb8 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -10,7 +10,9 @@
#include <fstream> // NOLINT(readability/streams)
#include <memory>
#include <sstream>
+#include <string>
#include <unordered_map>
+#include <utility>
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
@@ -968,13 +970,13 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
builder.AppendJavaScriptFrame(java_script);
} else if (summary.IsWasmCompiled()) {
//=========================================================
- // Handle a WASM compiled frame.
+ // Handle a Wasm compiled frame.
//=========================================================
auto const& wasm_compiled = summary.AsWasmCompiled();
builder.AppendWasmCompiledFrame(wasm_compiled);
} else if (summary.IsWasmInterpreted()) {
//=========================================================
- // Handle a WASM interpreted frame.
+ // Handle a Wasm interpreted frame.
//=========================================================
auto const& wasm_interpreted = summary.AsWasmInterpreted();
builder.AppendWasmInterpretedFrame(wasm_interpreted);
@@ -1523,7 +1525,10 @@ Object Isolate::Throw(Object raw_exception, MessageLocation* location) {
// Notify debugger of exception.
if (is_catchable_by_javascript(raw_exception)) {
- debug()->OnThrow(exception);
+ base::Optional<Object> maybe_exception = debug()->OnThrow(exception);
+ if (maybe_exception.has_value()) {
+ return *maybe_exception;
+ }
}
// Generate the message if required.
@@ -1609,6 +1614,7 @@ Object Isolate::UnwindAndFindHandler() {
// Special handling of termination exceptions, uncatchable by JavaScript and
// Wasm code, we unwind the handlers until the top ENTRY handler is found.
bool catchable_by_js = is_catchable_by_javascript(exception);
+ bool catchable_by_wasm = is_catchable_by_wasm(exception);
// Compute handler and stack unwinding information by performing a full walk
// over the stack and dispatching according to the frame type.
@@ -1659,8 +1665,9 @@ Object Isolate::UnwindAndFindHandler() {
trap_handler::ClearThreadInWasm();
}
+ if (!catchable_by_wasm) break;
+
// For WebAssembly frames we perform a lookup in the handler table.
- if (!catchable_by_js) break;
// This code ref scope is here to avoid a check failure when looking up
// the code. It's not actually necessary to keep the code alive as it's
// currently being executed.
@@ -2851,6 +2858,10 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
+ jitless_(FLAG_jitless),
+#if V8_SFI_HAS_UNIQUE_ID
+ next_unique_sfi_id_(0),
+#endif
cancelable_task_manager_(new CancelableTaskManager()) {
TRACE_ISOLATE(constructor);
CheckIsolateLayout();
@@ -2900,10 +2911,10 @@ void Isolate::CheckIsolateLayout() {
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.external_memory_limit_)),
Internals::kExternalMemoryLimitOffset);
- CHECK_EQ(Internals::kExternalMemoryAtLastMarkCompactOffset % 8, 0);
+ CHECK_EQ(Internals::kExternalMemoryLowSinceMarkCompactOffset % 8, 0);
CHECK_EQ(static_cast<int>(OFFSET_OF(
- Isolate, isolate_data_.external_memory_at_last_mark_compact_)),
- Internals::kExternalMemoryAtLastMarkCompactOffset);
+ Isolate, isolate_data_.external_memory_low_since_mark_compact_)),
+ Internals::kExternalMemoryLowSinceMarkCompactOffset);
}
void Isolate::ClearSerializerData() {
@@ -2929,7 +2940,7 @@ void Isolate::Deinit() {
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- heap()->memory_allocator()) {
+ heap()->memory_allocator() && RequiresCodeRange()) {
const base::AddressRegion& code_range =
heap()->memory_allocator()->code_range();
void* start = reinterpret_cast<void*>(code_range.begin());
@@ -3333,7 +3344,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
has_fatal_error_ = false;
// The initialization process does not handle memory exhaustion.
- AlwaysAllocateScope always_allocate(this);
+ AlwaysAllocateScope always_allocate(heap());
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
@@ -3420,15 +3431,15 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
setup_delegate_->SetupBuiltins(this);
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
// Store the interpreter entry trampoline on the root list. It is used as a
// template for further copies that may later be created to help profile
// interpreted code.
- // We currently cannot do this on arm due to RELATIVE_CODE_TARGETs
- // assuming that all possible Code targets may be addressed with an int24
- // offset, effectively limiting code space size to 32MB. We can guarantee
- // this at mksnapshot-time, but not at runtime.
- // See also: https://crbug.com/v8/8713.
+ // We currently cannot do this on above architectures due to
+ // RELATIVE_CODE_TARGETs assuming that all possible Code targets may be
+ // addressed with an int24 offset, effectively limiting code space size to
+ // 32MB. We can guarantee this at mksnapshot-time, but not at runtime. See
+ // also: https://crbug.com/v8/8713.
heap_.SetInterpreterEntryTrampolineForProfiling(
heap_.builtin(Builtins::kInterpreterEntryTrampoline));
#endif
@@ -3462,8 +3473,8 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// If we are deserializing, read the state into the now-empty heap.
{
- AlwaysAllocateScope always_allocate(this);
- CodeSpaceMemoryModificationScope modification_scope(&heap_);
+ AlwaysAllocateScope always_allocate(heap());
+ CodeSpaceMemoryModificationScope modification_scope(heap());
if (create_heap_objects) {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
@@ -3503,11 +3514,11 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
}
#endif // DEBUG
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
// The IET for profiling should always be a full on-heap Code object.
DCHECK(!Code::cast(heap_.interpreter_entry_trampoline_for_profiling())
.is_off_heap_trampoline());
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
if (FLAG_print_builtin_code) builtins()->PrintBuiltinCode();
if (FLAG_print_builtin_size) builtins()->PrintBuiltinSize();
@@ -3696,6 +3707,11 @@ bool Isolate::use_optimizer() {
!is_precise_count_code_coverage();
}
+void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
+ DCHECK(code->IsCode() || code->IsByteArray());
+ total_regexp_code_generated_ += code->Size();
+}
+
bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
return NeedsSourcePositionsForProfiling() ||
detailed_source_positions_for_profiling();
@@ -3752,18 +3768,21 @@ void Isolate::set_date_cache(DateCache* date_cache) {
date_cache_ = date_cache;
}
-bool Isolate::IsArrayOrObjectOrStringPrototype(Object object) {
+Isolate::KnownPrototype Isolate::IsArrayOrObjectOrStringPrototype(
+ Object object) {
Object context = heap()->native_contexts_list();
while (!context.IsUndefined(this)) {
Context current_context = Context::cast(context);
- if (current_context.initial_object_prototype() == object ||
- current_context.initial_array_prototype() == object ||
- current_context.initial_string_prototype() == object) {
- return true;
+ if (current_context.initial_object_prototype() == object) {
+ return KnownPrototype::kObject;
+ } else if (current_context.initial_array_prototype() == object) {
+ return KnownPrototype::kArray;
+ } else if (current_context.initial_string_prototype() == object) {
+ return KnownPrototype::kString;
}
context = current_context.next_context_link();
}
- return false;
+ return KnownPrototype::kNone;
}
bool Isolate::IsInAnyContext(Object object, uint32_t index) {
@@ -3783,7 +3802,13 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->map().is_prototype_map()) return;
if (!Protectors::IsNoElementsIntact(this)) return;
- if (!IsArrayOrObjectOrStringPrototype(*object)) return;
+ KnownPrototype obj_type = IsArrayOrObjectOrStringPrototype(*object);
+ if (obj_type == KnownPrototype::kNone) return;
+ if (obj_type == KnownPrototype::kObject) {
+ this->CountUsage(v8::Isolate::kObjectPrototypeHasElements);
+ } else if (obj_type == KnownPrototype::kArray) {
+ this->CountUsage(v8::Isolate::kArrayPrototypeHasElements);
+ }
Protectors::InvalidateNoElements(this);
}
@@ -3909,18 +3934,15 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
if (!thread_local_top()->CallDepthIsZero()) return;
- bool run_microtasks =
- microtask_queue && microtask_queue->size() &&
- !microtask_queue->HasMicrotasksSuppressions() &&
+ bool perform_checkpoint =
+ microtask_queue &&
microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kAuto;
- if (run_microtasks) {
- microtask_queue->RunMicrotasks(this);
- }
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
+ if (perform_checkpoint) microtask_queue->PerformCheckpoint(isolate);
if (call_completed_callbacks_.empty()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
std::vector<CallCompletedCallback> callbacks(call_completed_callbacks_);
for (auto& callback : callbacks) {
@@ -4002,12 +4024,12 @@ void Isolate::SetHostCleanupFinalizationGroupCallback(
}
void Isolate::RunHostCleanupFinalizationGroupCallback(
- Handle<JSFinalizationGroup> fg) {
+ Handle<JSFinalizationRegistry> fr) {
if (host_cleanup_finalization_group_callback_ != nullptr) {
v8::Local<v8::Context> api_context =
- v8::Utils::ToLocal(handle(Context::cast(fg->native_context()), this));
+ v8::Utils::ToLocal(handle(Context::cast(fr->native_context()), this));
host_cleanup_finalization_group_callback_(api_context,
- v8::Utils::ToLocal(fg));
+ v8::Utils::ToLocal(fr));
}
}
@@ -4233,6 +4255,8 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
}
}
+int Isolate::GetNextScriptId() { return heap()->NextScriptId(); }
+
// static
std::string Isolate::GetTurboCfgFileName(Isolate* isolate) {
if (FLAG_trace_turbo_cfg_file == nullptr) {
@@ -4348,10 +4372,9 @@ void Isolate::PrintWithTimestamp(const char* format, ...) {
}
void Isolate::SetIdle(bool is_idle) {
- if (!is_profiling()) return;
StateTag state = current_vm_state();
- DCHECK(state == EXTERNAL || state == IDLE);
if (js_entry_sp() != kNullAddress) return;
+ DCHECK(state == EXTERNAL || state == IDLE);
if (is_idle) {
set_current_vm_state(IDLE);
} else if (state == IDLE) {
@@ -4479,6 +4502,10 @@ void Isolate::AddCodeRange(Address begin, size_t length_in_bytes) {
MemoryRange{reinterpret_cast<void*>(begin), length_in_bytes});
}
+bool Isolate::RequiresCodeRange() const {
+ return kPlatformRequiresCodeRange && !jitless_;
+}
+
// |chunk| is either a Page or an executable LargePage.
void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) {
// We only keep track of individual code pages/allocations if we are on arm32,
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 913249dfe5..037e6ea7f5 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -204,7 +204,7 @@ class BuiltinUnwindInfo;
#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
do { \
- Isolate* __isolate__ = (isolate); \
+ auto* __isolate__ = (isolate); \
ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \
ReadOnlyRoots(__isolate__).exception()); \
} while (false)
@@ -212,21 +212,21 @@ class BuiltinUnwindInfo;
#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
-#define THROW_NEW_ERROR(isolate, call, T) \
- do { \
- Isolate* __isolate__ = (isolate); \
- return __isolate__->Throw<T>(__isolate__->factory()->call); \
+#define THROW_NEW_ERROR(isolate, call, T) \
+ do { \
+ auto* __isolate__ = (isolate); \
+ return __isolate__->template Throw<T>(__isolate__->factory()->call); \
} while (false)
#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
do { \
- Isolate* __isolate__ = (isolate); \
+ auto* __isolate__ = (isolate); \
return __isolate__->Throw(*__isolate__->factory()->call); \
} while (false)
#define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
do { \
- Isolate* __isolate__ = (isolate); \
+ auto* __isolate__ = (isolate); \
__isolate__->Throw(*__isolate__->factory()->call); \
return value; \
} while (false)
@@ -437,7 +437,9 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(v8_inspector::V8Inspector*, inspector, nullptr) \
V(bool, next_v8_call_is_safe_for_termination, false) \
V(bool, only_terminate_in_safe_scope, false) \
- V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info)
+ V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info) \
+ V(int, embedder_wrapper_type_index, -1) \
+ V(int, embedder_wrapper_object_index, -1)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
@@ -450,13 +452,15 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
// Factory's members available to Isolate directly.
class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
-class Isolate final : private HiddenFactory {
+class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
class ThreadDataTable;
class EntryStackItem;
public:
+ using HandleScopeType = HandleScope;
+
// A thread has a PerIsolateThreadData instance for each isolate that it has
// entered. That instance is allocated when the isolate is initially entered
// and reused on subsequent entries.
@@ -509,7 +513,7 @@ class Isolate final : private HiddenFactory {
// Creates Isolate object. Must be used instead of constructing Isolate with
// new operator.
- static V8_EXPORT_PRIVATE Isolate* New(
+ static Isolate* New(
IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
// Deletes Isolate object. Must be used instead of delete operator.
@@ -613,7 +617,7 @@ class Isolate final : private HiddenFactory {
inline void set_pending_exception(Object exception_obj);
inline void clear_pending_exception();
- V8_EXPORT_PRIVATE bool AreWasmThreadsEnabled(Handle<Context> context);
+ bool AreWasmThreadsEnabled(Handle<Context> context);
THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
@@ -649,6 +653,7 @@ class Isolate final : private HiddenFactory {
bool IsExternalHandlerOnTop(Object exception);
inline bool is_catchable_by_javascript(Object exception);
+ inline bool is_catchable_by_wasm(Object exception);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -676,9 +681,9 @@ class Isolate final : private HiddenFactory {
return &thread_local_top()->js_entry_sp_;
}
- V8_EXPORT_PRIVATE std::vector<MemoryRange>* GetCodePages() const;
+ std::vector<MemoryRange>* GetCodePages() const;
- V8_EXPORT_PRIVATE void SetCodePages(std::vector<MemoryRange>* new_code_pages);
+ void SetCodePages(std::vector<MemoryRange>* new_code_pages);
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
@@ -694,7 +699,7 @@ class Isolate final : private HiddenFactory {
// exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned.
- V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool clear_exception);
+ bool OptionalRescheduleException(bool clear_exception);
// Push and pop a promise and the current try-catch handler.
void PushPromise(Handle<JSObject> promise);
@@ -719,7 +724,7 @@ class Isolate final : private HiddenFactory {
Handle<Object> pending_exception_;
};
- V8_EXPORT_PRIVATE void SetCaptureStackTraceForUncaughtExceptions(
+ void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit, StackTrace::StackTraceOptions options);
void SetAbortOnUncaughtExceptionCallback(
@@ -729,8 +734,7 @@ class Isolate final : private HiddenFactory {
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator,
PrintStackMode mode = kPrintStackVerbose);
- V8_EXPORT_PRIVATE void PrintStack(FILE* out,
- PrintStackMode mode = kPrintStackVerbose);
+ void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
Handle<String> StackTraceString();
// Stores a stack trace in a stack-allocated temporary buffer which will
// end up in the minidump for debugging purposes.
@@ -774,6 +778,10 @@ class Isolate final : private HiddenFactory {
void ThrowAt(Handle<JSObject> exception, MessageLocation* location);
+ void FatalProcessOutOfHeapMemory(const char* location) {
+ heap()->FatalProcessOutOfMemory(location);
+ }
+
void set_console_delegate(debug::ConsoleDelegate* delegate) {
console_delegate_ = delegate;
}
@@ -807,7 +815,7 @@ class Isolate final : private HiddenFactory {
};
CatchType PredictExceptionCatcher();
- V8_EXPORT_PRIVATE void ScheduleThrow(Object exception);
+ void ScheduleThrow(Object exception);
// Re-set pending message, script and positions reported to the TryCatch
// back to the TLS for re-use when rethrowing.
void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
@@ -829,19 +837,18 @@ class Isolate final : private HiddenFactory {
bool ComputeLocation(MessageLocation* target);
bool ComputeLocationFromException(MessageLocation* target,
Handle<Object> exception);
- V8_EXPORT_PRIVATE bool ComputeLocationFromStackTrace(
- MessageLocation* target, Handle<Object> exception);
+ bool ComputeLocationFromStackTrace(MessageLocation* target,
+ Handle<Object> exception);
- V8_EXPORT_PRIVATE Handle<JSMessageObject> CreateMessage(
- Handle<Object> exception, MessageLocation* location);
+ Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
+ MessageLocation* location);
// Out of resource exception helpers.
Object StackOverflow();
Object TerminateExecution();
void CancelTerminateExecution();
- V8_EXPORT_PRIVATE void RequestInterrupt(InterruptCallback callback,
- void* data);
+ void RequestInterrupt(InterruptCallback callback, void* data);
void InvokeApiInterruptCallbacks();
// Administration
@@ -1034,9 +1041,7 @@ class Isolate final : private HiddenFactory {
RegExpStack* regexp_stack() { return regexp_stack_; }
size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
- void IncreaseTotalRegexpCodeGenerated(int size) {
- total_regexp_code_generated_ += size;
- }
+ void IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code);
std::vector<int>* regexp_indices() { return &regexp_indices_; }
@@ -1085,13 +1090,13 @@ class Isolate final : private HiddenFactory {
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
- V8_EXPORT_PRIVATE bool use_optimizer();
+ bool use_optimizer();
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
bool NeedsSourcePositionsForProfiling() const;
- V8_EXPORT_PRIVATE bool NeedsDetailedOptimizedCodeLineInfo() const;
+ bool NeedsDetailedOptimizedCodeLineInfo() const;
bool is_best_effort_code_coverage() const {
return code_coverage_mode() == debug::CoverageMode::kBestEffort;
@@ -1144,7 +1149,7 @@ class Isolate final : private HiddenFactory {
DateCache* date_cache() { return date_cache_; }
- V8_EXPORT_PRIVATE void set_date_cache(DateCache* date_cache);
+ void set_date_cache(DateCache* date_cache);
#ifdef V8_INTL_SUPPORT
@@ -1169,7 +1174,9 @@ class Isolate final : private HiddenFactory {
#endif // V8_INTL_SUPPORT
- bool IsArrayOrObjectOrStringPrototype(Object object);
+ enum class KnownPrototype { kNone, kObject, kArray, kString };
+
+ KnownPrototype IsArrayOrObjectOrStringPrototype(Object object);
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
@@ -1214,7 +1221,7 @@ class Isolate final : private HiddenFactory {
int id() const { return id_; }
CompilationStatistics* GetTurboStatistics();
- V8_EXPORT_PRIVATE CodeTracer* GetCodeTracer();
+ CodeTracer* GetCodeTracer();
void DumpAndResetStats();
@@ -1228,16 +1235,16 @@ class Isolate final : private HiddenFactory {
return &debug_execution_mode_;
}
- V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
+ base::RandomNumberGenerator* random_number_generator();
- V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();
+ base::RandomNumberGenerator* fuzzer_rng();
// Generates a random number that is non-zero when masked
// with the provided mask.
int GenerateIdentityHash(uint32_t mask);
// Given an address occupied by a live code object, return that object.
- V8_EXPORT_PRIVATE Code FindCodeObject(Address a);
+ Code FindCodeObject(Address a);
int NextOptimizationId() {
int id = next_optimization_id_++;
@@ -1267,14 +1274,27 @@ class Isolate final : private HiddenFactory {
Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
bool private_symbol);
- V8_EXPORT_PRIVATE void SetUseCounterCallback(
- v8::Isolate::UseCounterCallback callback);
+ void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
static std::string GetTurboCfgFileName(Isolate* isolate);
+ int GetNextScriptId();
+
#if V8_SFI_HAS_UNIQUE_ID
- int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
+ int GetNextUniqueSharedFunctionInfoId() {
+ int current_id = next_unique_sfi_id_.load(std::memory_order_relaxed);
+ int next_id;
+ do {
+ if (current_id >= Smi::kMaxValue) {
+ next_id = 0;
+ } else {
+ next_id = current_id + 1;
+ }
+ } while (!next_unique_sfi_id_.compare_exchange_weak(
+ current_id, next_id, std::memory_order_relaxed));
+ return current_id;
+ }
#endif
Address promise_hook_address() {
@@ -1306,10 +1326,9 @@ class Isolate final : private HiddenFactory {
double timeout_in_ms,
AtomicsWaitWakeHandle* stop_handle);
- V8_EXPORT_PRIVATE void SetPromiseHook(PromiseHook hook);
- V8_EXPORT_PRIVATE void RunPromiseHook(PromiseHookType type,
- Handle<JSPromise> promise,
- Handle<Object> parent);
+ void SetPromiseHook(PromiseHook hook);
+ void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent);
void PromiseHookStateUpdated();
void AddDetachedContext(Handle<Context> context);
@@ -1334,14 +1353,14 @@ class Isolate final : private HiddenFactory {
// builtins constants table to remain unchanged from build-time.
size_t HashIsolateForEmbeddedBlob();
- V8_EXPORT_PRIVATE static const uint8_t* CurrentEmbeddedBlob();
- V8_EXPORT_PRIVATE static uint32_t CurrentEmbeddedBlobSize();
+ static const uint8_t* CurrentEmbeddedBlob();
+ static uint32_t CurrentEmbeddedBlobSize();
static bool CurrentEmbeddedBlobIsBinaryEmbedded();
// These always return the same result as static methods above, but don't
// access the global atomic variable (and thus *might be* slightly faster).
- V8_EXPORT_PRIVATE const uint8_t* embedded_blob() const;
- V8_EXPORT_PRIVATE uint32_t embedded_blob_size() const;
+ const uint8_t* embedded_blob() const;
+ uint32_t embedded_blob_size() const;
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
@@ -1391,17 +1410,21 @@ class Isolate final : private HiddenFactory {
void ClearKeptObjects();
void SetHostCleanupFinalizationGroupCallback(
HostCleanupFinalizationGroupCallback callback);
- void RunHostCleanupFinalizationGroupCallback(Handle<JSFinalizationGroup> fg);
+ HostCleanupFinalizationGroupCallback
+ host_cleanup_finalization_group_callback() const {
+ return host_cleanup_finalization_group_callback_;
+ }
+ void RunHostCleanupFinalizationGroupCallback(
+ Handle<JSFinalizationRegistry> fr);
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
- V8_EXPORT_PRIVATE MaybeHandle<JSPromise>
- RunHostImportModuleDynamicallyCallback(Handle<Script> referrer,
- Handle<Object> specifier);
+ MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
+ Handle<Script> referrer, Handle<Object> specifier);
void SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback);
- V8_EXPORT_PRIVATE Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
+ Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
Handle<SourceTextModule> module);
void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
@@ -1460,8 +1483,7 @@ class Isolate final : private HiddenFactory {
bool allow_atomics_wait() { return allow_atomics_wait_; }
// Register a finalizer to be called at isolate teardown.
- V8_EXPORT_PRIVATE void RegisterManagedPtrDestructor(
- ManagedPtrDestructor* finalizer);
+ void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
// Removes a previously-registered shared object finalizer.
void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
@@ -1472,8 +1494,7 @@ class Isolate final : private HiddenFactory {
}
wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
- V8_EXPORT_PRIVATE void SetWasmEngine(
- std::shared_ptr<wasm::WasmEngine> engine);
+ void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
return top_backup_incumbent_scope_;
@@ -1483,23 +1504,25 @@ class Isolate final : private HiddenFactory {
top_backup_incumbent_scope_ = top_backup_incumbent_scope;
}
- V8_EXPORT_PRIVATE void SetIdle(bool is_idle);
+ void SetIdle(bool is_idle);
// Changing various modes can cause differences in generated bytecode which
// interferes with lazy source positions, so this should be called immediately
// before such a mode change to ensure that this cannot happen.
- V8_EXPORT_PRIVATE void CollectSourcePositionsForAllBytecodeArrays();
+ void CollectSourcePositionsForAllBytecodeArrays();
void AddCodeMemoryChunk(MemoryChunk* chunk);
void RemoveCodeMemoryChunk(MemoryChunk* chunk);
- V8_EXPORT_PRIVATE void AddCodeRange(Address begin, size_t length_in_bytes);
+ void AddCodeRange(Address begin, size_t length_in_bytes);
+
+ bool RequiresCodeRange() const;
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
- V8_EXPORT_PRIVATE bool Init(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer);
+ bool Init(ReadOnlyDeserializer* read_only_deserializer,
+ StartupDeserializer* startup_deserializer);
void CheckIsolateLayout();
@@ -1696,7 +1719,7 @@ class Isolate final : private HiddenFactory {
double time_millis_at_init_ = 0;
#ifdef DEBUG
- V8_EXPORT_PRIVATE static std::atomic<size_t> non_disposed_isolates_;
+ static std::atomic<size_t> non_disposed_isolates_;
JSObject::SpillInformation js_spill_information_;
#endif
@@ -1732,7 +1755,7 @@ class Isolate final : private HiddenFactory {
// preprocessor defines. Make sure the offsets of these fields agree
// between compilation units.
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
- V8_EXPORT_PRIVATE static const intptr_t name##_debug_offset_;
+ static const intptr_t name##_debug_offset_;
ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
@@ -1746,10 +1769,12 @@ class Isolate final : private HiddenFactory {
bool force_slow_path_ = false;
+ bool jitless_ = false;
+
int next_optimization_id_ = 0;
#if V8_SFI_HAS_UNIQUE_ID
- int next_unique_sfi_id_ = 0;
+ std::atomic<int> next_unique_sfi_id_;
#endif
// Vector of callbacks before a Call starts execution.
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index f11ab23848..d1a76f654c 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -581,6 +581,12 @@ Handle<Object> WasmStackFrame::GetFunctionName() {
return name;
}
+Handle<Object> WasmStackFrame::GetScriptNameOrSourceUrl() {
+ Handle<Script> script = GetScript();
+ DCHECK_EQ(Script::TYPE_WASM, script->type());
+ return ScriptNameOrSourceUrl(script, isolate_);
+}
+
Handle<Object> WasmStackFrame::GetWasmModuleName() {
Handle<Object> module_name;
Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
@@ -994,9 +1000,20 @@ MaybeHandle<JSObject> ErrorUtils::Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
StackTraceCollection stack_trace_collection) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ // Abort range errors in correctness fuzzing, as their causes differ
+ // accross correctness-fuzzing scenarios.
+ if (target.is_identical_to(isolate->range_error_function())) {
+ FATAL("Aborting on range error");
+ }
+ // Ignore error messages in correctness fuzzing, because the spec leaves
+ // room for undefined behavior.
+ message = isolate->factory()->InternalizeUtf8String(
+ "Message suppressed for fuzzers (--correctness-fuzzer-suppressions)");
+ }
+
// 1. If NewTarget is undefined, let newTarget be the active function object,
// else let newTarget be NewTarget.
-
Handle<JSReceiver> new_target_recv =
new_target->IsJSReceiver() ? Handle<JSReceiver>::cast(new_target)
: Handle<JSReceiver>::cast(target);
@@ -1154,15 +1171,7 @@ Handle<JSObject> ErrorUtils::MakeGenericError(
// pending exceptions would be cleared. Preserve this behavior.
isolate->clear_pending_exception();
}
- Handle<String> msg;
- if (FLAG_correctness_fuzzer_suppressions) {
- // Ignore error messages in correctness fuzzing, because the spec leaves
- // room for undefined behavior.
- msg = isolate->factory()->InternalizeUtf8String(
- "Message suppressed for fuzzers (--correctness-fuzzer-suppressions)");
- } else {
- msg = DoFormatMessage(isolate, index, arg0, arg1, arg2);
- }
+ Handle<String> msg = DoFormatMessage(isolate, index, arg0, arg1, arg2);
DCHECK(mode != SKIP_UNTIL_SEEN);
@@ -1230,7 +1239,7 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
if (ComputeLocation(isolate, location)) {
ParseInfo info(isolate, *location->shared());
if (parsing::ParseAny(&info, location->shared(), isolate)) {
- info.ast_value_factory()->Internalize(isolate->factory());
+ info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location->shared()->IsUserJavaScript());
Handle<String> str = printer.Print(info.literal(), location->start_pos());
*hint = printer.GetErrorHint();
@@ -1281,6 +1290,38 @@ Handle<Object> ErrorUtils::NewIteratorError(Isolate* isolate,
return isolate->factory()->NewTypeError(id, callsite);
}
+Object ErrorUtils::ThrowSpreadArgIsNullOrUndefinedError(Isolate* isolate,
+ Handle<Object> object) {
+ MessageLocation location;
+ Handle<String> callsite;
+ if (ComputeLocation(isolate, &location)) {
+ ParseInfo info(isolate, *location.shared());
+ if (parsing::ParseAny(&info, location.shared(), isolate)) {
+ info.ast_value_factory()->Internalize(isolate);
+ CallPrinter printer(isolate, location.shared()->IsUserJavaScript(),
+ CallPrinter::SpreadErrorInArgsHint::kErrorInArgs);
+ Handle<String> str = printer.Print(info.literal(), location.start_pos());
+ callsite =
+ str->length() > 0 ? str : BuildDefaultCallSite(isolate, object);
+
+ if (printer.spread_arg() != nullptr) {
+ // Change the message location to point at the property name.
+ int pos = printer.spread_arg()->position();
+ location =
+ MessageLocation(location.script(), pos, pos + 1, location.shared());
+ }
+ } else {
+ isolate->clear_pending_exception();
+ callsite = BuildDefaultCallSite(isolate, object);
+ }
+ }
+
+ MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
+ Handle<Object> exception =
+ isolate->factory()->NewTypeError(id, callsite, object);
+ return isolate->Throw(*exception, &location);
+}
+
Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
Handle<Object> source) {
MessageLocation location;
@@ -1331,7 +1372,7 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
ParseInfo info(isolate, *location.shared());
if (parsing::ParseAny(&info, location.shared(), isolate)) {
- info.ast_value_factory()->Internalize(isolate->factory());
+ info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
Handle<String> str = printer.Print(info.literal(), location.start_pos());
@@ -1348,8 +1389,7 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
maybe_property_name = destructuring_prop->key()
->AsLiteral()
->AsRawPropertyName()
- ->string()
- .get<Factory>();
+ ->string();
// Change the message location to point at the property name.
pos = destructuring_prop->key()->position();
}
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index cc6a233138..cf54cac852 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -169,7 +169,7 @@ class WasmStackFrame : public StackFrameBase {
Handle<Object> GetFileName() override { return Null(); }
Handle<Object> GetFunctionName() override;
- Handle<Object> GetScriptNameOrSourceUrl() override { return Null(); }
+ Handle<Object> GetScriptNameOrSourceUrl() override;
Handle<Object> GetMethodName() override { return Null(); }
Handle<Object> GetTypeName() override { return Null(); }
Handle<Object> GetWasmModuleName() override;
@@ -293,6 +293,8 @@ class ErrorUtils : public AllStatic {
Handle<Object> source);
static Handle<Object> NewConstructedNonConstructable(Isolate* isolate,
Handle<Object> source);
+ static Object ThrowSpreadArgIsNullOrUndefinedError(Isolate* isolate,
+ Handle<Object> object);
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object);
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index fca9d2d3f4..496737b03a 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -115,6 +115,7 @@ void MicrotaskQueue::PerformCheckpoint(v8::Isolate* v8_isolate) {
!HasMicrotasksSuppressions()) {
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
RunMicrotasks(isolate);
+ isolate->ClearKeptObjects();
}
}
diff --git a/deps/v8/src/execution/mips/frame-constants-mips.h b/deps/v8/src/execution/mips/frame-constants-mips.h
index 08c9c789ac..6287203f95 100644
--- a/deps/v8/src/execution/mips/frame-constants-mips.h
+++ b/deps/v8/src/execution/mips/frame-constants-mips.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
#define V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -36,6 +37,42 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kDoubleSize;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7}
+ static constexpr uint32_t kPushedGpRegs = 0b111111111111100 + (1 << 23);
+ // {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24}
+ static constexpr uint32_t kPushedFpRegs = 0b1010101010101010101010101;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index 2d9a924c14..edb734d37c 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -465,7 +465,8 @@ void MipsDebugger::Debug() {
} else {
PrintF("printobject <value>\n");
}
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
int32_t* cur = nullptr;
int32_t* end = nullptr;
int next_arg = 1;
@@ -505,20 +506,23 @@ void MipsDebugger::Debug() {
end = cur + words;
}
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
while (cur < end) {
PrintF(" 0x%08" PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %d", Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
}
- PrintF(")");
}
PrintF("\n");
cur++;
@@ -702,6 +706,10 @@ void MipsDebugger::Debug() {
PrintF(" dump stack content, default dump 10 words)\n");
PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("disasm [<instructions>]\n");
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.h b/deps/v8/src/execution/mips64/frame-constants-mips64.h
index 58bb82a153..7a49ef6d19 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_MIPS64_FRAME_CONSTANTS_MIPS64_H_
#define V8_EXECUTION_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -32,6 +33,42 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kDoubleSize;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7}
+ static constexpr uint32_t kPushedGpRegs = 0b111111111111100 + (1 << 23);
+ // {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26}
+ static constexpr uint32_t kPushedFpRegs = 0b101010101010101010101010101;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 78dbc29a0b..72f2836329 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -420,7 +420,8 @@ void MipsDebugger::Debug() {
} else {
PrintF("printobject <value>\n");
}
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
int64_t* cur = nullptr;
int64_t* end = nullptr;
int next_arg = 1;
@@ -447,20 +448,23 @@ void MipsDebugger::Debug() {
}
end = cur + words;
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
while (cur < end) {
PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %d", Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
}
- PrintF(")");
}
PrintF("\n");
cur++;
@@ -644,6 +648,10 @@ void MipsDebugger::Debug() {
PrintF(" dump stack content, default dump 10 words)\n");
PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("disasm [<instructions>]\n");
diff --git a/deps/v8/src/execution/off-thread-isolate.cc b/deps/v8/src/execution/off-thread-isolate.cc
new file mode 100644
index 0000000000..c08d51d7b4
--- /dev/null
+++ b/deps/v8/src/execution/off-thread-isolate.cc
@@ -0,0 +1,45 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/off-thread-isolate.h"
+
+#include "src/execution/isolate.h"
+#include "src/execution/thread-id.h"
+#include "src/logging/off-thread-logger.h"
+
+namespace v8 {
+namespace internal {
+
+OffThreadIsolate::OffThreadIsolate(Isolate* isolate, Zone* zone)
+ : HiddenOffThreadFactory(isolate),
+ isolate_(isolate),
+ logger_(new OffThreadLogger()),
+ handle_zone_(zone) {}
+OffThreadIsolate::~OffThreadIsolate() { delete logger_; }
+
+int OffThreadIsolate::GetNextScriptId() { return isolate_->GetNextScriptId(); }
+
+#if V8_SFI_HAS_UNIQUE_ID
+int OffThreadIsolate::GetNextUniqueSharedFunctionInfoId() {
+ return isolate_->GetNextUniqueSharedFunctionInfoId();
+}
+#endif // V8_SFI_HAS_UNIQUE_ID
+
+bool OffThreadIsolate::NeedsSourcePositionsForProfiling() {
+ // TODO(leszeks): Figure out if it makes sense to check this asynchronously.
+ return isolate_->NeedsSourcePositionsForProfiling();
+}
+
+bool OffThreadIsolate::is_collecting_type_profile() {
+ // TODO(leszeks): Figure out if it makes sense to check this asynchronously.
+ return isolate_->is_collecting_type_profile();
+}
+
+void OffThreadIsolate::PinToCurrentThread() {
+ DCHECK(!thread_id_.IsValid());
+ thread_id_ = ThreadId::Current();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/off-thread-isolate.h b/deps/v8/src/execution/off-thread-isolate.h
new file mode 100644
index 0000000000..9a75c32859
--- /dev/null
+++ b/deps/v8/src/execution/off-thread-isolate.h
@@ -0,0 +1,99 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_OFF_THREAD_ISOLATE_H_
+#define V8_EXECUTION_OFF_THREAD_ISOLATE_H_
+
+#include "src/base/logging.h"
+#include "src/execution/thread-id.h"
+#include "src/handles/handles.h"
+#include "src/heap/off-thread-factory.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class OffThreadLogger;
+
+// HiddenOffThreadFactory parallels Isolate's HiddenFactory
+class V8_EXPORT_PRIVATE HiddenOffThreadFactory : private OffThreadFactory {
+ public:
+ // Forward constructors.
+ using OffThreadFactory::OffThreadFactory;
+};
+
+// And Isolate-like class that can be passed in to templated methods that need
+// an isolate syntactically, but are usable off-thread.
+//
+// This class holds an OffThreadFactory, but is otherwise effectively a stub
+// implementation of an Isolate. In particular, it doesn't allow throwing
+// exceptions, and hard crashes if you try.
+class V8_EXPORT_PRIVATE OffThreadIsolate final
+ : private HiddenOffThreadFactory {
+ public:
+ using HandleScopeType = OffThreadHandleScope;
+
+ explicit OffThreadIsolate(Isolate* isolate, Zone* zone);
+ ~OffThreadIsolate();
+
+ v8::internal::OffThreadFactory* factory() {
+ // Upcast to the privately inherited base-class using c-style casts to avoid
+ // undefined behavior (as static_cast cannot cast across private bases).
+ // NOLINTNEXTLINE (google-readability-casting)
+ return (
+ v8::internal::OffThreadFactory*)this; // NOLINT(readability/casting)
+ }
+
+ // This method finishes the use of the off-thread Isolate, and can be safely
+ // called off-thread.
+ void FinishOffThread() {
+ factory()->FinishOffThread();
+ handle_zone_ = nullptr;
+ }
+
+ template <typename T>
+ Handle<T> Throw(Handle<Object> exception) {
+ UNREACHABLE();
+ }
+ [[noreturn]] void FatalProcessOutOfHeapMemory(const char* location) {
+ UNREACHABLE();
+ }
+
+ Address* NewHandle(Address object) {
+ DCHECK_NOT_NULL(handle_zone_);
+ Address* location =
+ static_cast<Address*>(handle_zone_->New(sizeof(Address)));
+ *location = object;
+ return location;
+ }
+
+ int GetNextScriptId();
+#if V8_SFI_HAS_UNIQUE_ID
+ int GetNextUniqueSharedFunctionInfoId();
+#endif // V8_SFI_HAS_UNIQUE_ID
+
+ bool NeedsSourcePositionsForProfiling();
+ bool is_collecting_type_profile();
+
+ OffThreadLogger* logger() { return logger_; }
+
+ void PinToCurrentThread();
+ ThreadId thread_id() { return thread_id_; }
+
+ private:
+ friend class v8::internal::OffThreadFactory;
+
+ // TODO(leszeks): Extract out the fields of the Isolate we want and store
+ // those instead of the whole thing.
+ Isolate* isolate_;
+
+ OffThreadLogger* logger_;
+ ThreadId thread_id_;
+ Zone* handle_zone_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_OFF_THREAD_ISOLATE_H_
diff --git a/deps/v8/src/execution/pointer-authentication-dummy.h b/deps/v8/src/execution/pointer-authentication-dummy.h
new file mode 100644
index 0000000000..32a10dc0dd
--- /dev/null
+++ b/deps/v8/src/execution/pointer-authentication-dummy.h
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_POINTER_AUTHENTICATION_DUMMY_H_
+#define V8_EXECUTION_POINTER_AUTHENTICATION_DUMMY_H_
+
+#include "src/execution/pointer-authentication.h"
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Dummy implementation of the PointerAuthentication class methods, to be used
+// when CFI is not enabled.
+
+// Load return address from {pc_address} and return it.
+V8_INLINE Address PointerAuthentication::AuthenticatePC(
+ Address* pc_address, unsigned offset_from_sp) {
+ USE(offset_from_sp);
+ return *pc_address;
+}
+
+// Return {pc} unmodified.
+V8_INLINE Address PointerAuthentication::StripPAC(Address pc) { return pc; }
+
+// Return {pc} unmodified.
+V8_INLINE Address PointerAuthentication::SignPCWithSP(Address pc, Address sp) {
+ USE(sp);
+ return pc;
+}
+
+// Store {new_pc} to {pc_address} without signing.
+V8_INLINE void PointerAuthentication::ReplacePC(Address* pc_address,
+ Address new_pc,
+ int offset_from_sp) {
+ USE(offset_from_sp);
+ *pc_address = new_pc;
+}
+
+// Do nothing.
+V8_INLINE void PointerAuthentication::ReplaceContext(Address* pc_address,
+ Address old_context,
+ Address new_context) {
+ USE(pc_address);
+ USE(old_context);
+ USE(new_context);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_POINTER_AUTHENTICATION_DUMMY_H_
diff --git a/deps/v8/src/execution/pointer-authentication.h b/deps/v8/src/execution/pointer-authentication.h
new file mode 100644
index 0000000000..f2d63773f4
--- /dev/null
+++ b/deps/v8/src/execution/pointer-authentication.h
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_POINTER_AUTHENTICATION_H_
+#define V8_EXECUTION_POINTER_AUTHENTICATION_H_
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class PointerAuthentication : public AllStatic {
+ public:
+ // When CFI is enabled, authenticate the address stored in {pc_address} and
+ // return the authenticated address. {offset_from_sp} is the offset between
+ // {pc_address} and the pointer used as a context for signing.
+ // When CFI is not enabled, simply load return address from {pc_address} and
+ // return it.
+ V8_INLINE static Address AuthenticatePC(Address* pc_address,
+ unsigned offset_from_sp);
+
+ // When CFI is enabled, strip Pointer Authentication Code (PAC) from {pc} and
+ // return the raw value.
+ // When CFI is not enabled, return {pc} unmodified.
+ V8_INLINE static Address StripPAC(Address pc);
+
+ // When CFI is enabled, sign {pc} using {sp} and return the signed value.
+ // When CFI is not enabled, return {pc} unmodified.
+ V8_INLINE static Address SignPCWithSP(Address pc, Address sp);
+
+ // When CFI is enabled, authenticate the address stored in {pc_address} and
+ // replace it with {new_pc}, after signing it. {offset_from_sp} is the offset
+ // between {pc_address} and the pointer used as a context for signing.
+ // When CFI is not enabled, store {new_pc} to {pc_address} without signing.
+ V8_INLINE static void ReplacePC(Address* pc_address, Address new_pc,
+ int offset_from_sp);
+
+ // When CFI is enabled, authenticate the address stored in {pc_address} based
+ // on {old_context} and replace it with the same address signed with
+ // {new_context} instead.
+ // When CFI is not enabled, do nothing.
+ V8_INLINE static void ReplaceContext(Address* pc_address, Address old_context,
+ Address new_context);
+};
+
+} // namespace internal
+} // namespace v8
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+
+#ifndef V8_TARGET_ARCH_ARM64
+#error "V8_ENABLE_CONTROL_FLOW_INTEGRITY should imply V8_TARGET_ARCH_ARM64"
+#endif
+#include "src/execution/arm64/pointer-authentication-arm64.h"
+
+#else
+
+#include "src/execution/pointer-authentication-dummy.h"
+
+#endif
+
+#endif // V8_EXECUTION_POINTER_AUTHENTICATION_H_
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.cc b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
index 05cde9c8ee..97bef56a56 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/execution/ppc/frame-constants-ppc.h"
@@ -32,4 +32,4 @@ int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index c987d48c27..47f3b9e410 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_PPC_FRAME_CONSTANTS_PPC_H_
#define V8_EXECUTION_PPC_FRAME_CONSTANTS_PPC_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -30,6 +31,43 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kDoubleSize;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {r3, r4, r5, r6, r7, r8, r9, r10, r11}
+ static constexpr uint32_t kPushedGpRegs = 0b111111111000;
+ // {d0 .. d12}
+ static constexpr uint32_t kPushedFpRegs = 0b1111111111111;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -TypedFrameConstants::kFixedFrameSizeFromFp -
+ kSystemPointerSize * kNumPushedGpRegisters;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kDoubleSize * kNumPushedFpRegisters;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index ab8786713b..2a9aa6486b 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -19,6 +19,7 @@
#include "src/diagnostics/disasm.h"
#include "src/execution/ppc/frame-constants-ppc.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
@@ -41,8 +42,6 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
class PPCDebugger {
public:
explicit PPCDebugger(Simulator* sim) : sim_(sim) {}
-
- void Stop(Instruction* instr);
void Debug();
private:
@@ -57,34 +56,20 @@ class PPCDebugger {
bool GetValue(const char* desc, intptr_t* value);
bool GetFPDoubleValue(const char* desc, double* value);
- // Set or delete a breakpoint. Returns true if successful.
+ // Set or delete breakpoint (there can be only one).
bool SetBreakpoint(Instruction* break_pc);
- bool DeleteBreakpoint(Instruction* break_pc);
+ void DeleteBreakpoint();
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
+ // Undo and redo the breakpoint. This is needed to bracket disassembly and
+ // execution to skip past the breakpoint when run from the debugger.
+ void UndoBreakpoint();
+ void RedoBreakpoint();
};
-void PPCDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- // use of kStopCodeMask not right on PowerPC
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() + kInstrSize);
- // Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
- sim_->watched_stops_[code].desc = msg;
- }
- // Print the stop message and code if it is not the default code.
- if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u: %s\n", code, msg);
- } else {
- PrintF("Simulator hit %s\n", msg);
- }
- sim_->set_pc(sim_->get_pc() + kInstrSize + kPointerSize);
- Debug();
+void Simulator::DebugAtNextPC() {
+ PrintF("Starting debugger on the next instruction:\n");
+ set_pc(get_pc() + kInstrSize);
+ PPCDebugger(this).Debug();
}
intptr_t PPCDebugger::GetRegisterValue(int regnum) {
@@ -139,25 +124,33 @@ bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
return true;
}
-bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
- if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
+namespace {
+// This function is dangerous, but it's only available in non-production
+// (simulator) builds.
+void SetInstructionBitsInCodeSpace(Instruction* instr, Instr value,
+ Heap* heap) {
+ CodeSpaceMemoryModificationScope scope(heap);
+ instr->SetInstructionBits(value);
+}
+} // namespace
+void PPCDebugger::DeleteBreakpoint() {
+ UndoBreakpoint();
sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
- return true;
}
-void PPCDebugger::UndoBreakpoints() {
+void PPCDebugger::UndoBreakpoint() {
if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ SetInstructionBitsInCodeSpace(sim_->break_pc_, sim_->break_instr_,
+ sim_->isolate_->heap());
}
}
-void PPCDebugger::RedoBreakpoints() {
+void PPCDebugger::RedoBreakpoint() {
if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ SetInstructionBitsInCodeSpace(sim_->break_pc_, kBreakpointInstr,
+ sim_->isolate_->heap());
}
}
@@ -181,9 +174,9 @@ void PPCDebugger::Debug() {
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
+ // Unset breakpoint while running in the debugger shell, making it invisible
+ // to all commands.
+ UndoBreakpoint();
// Disable tracing while simulating
bool trace = ::v8::internal::FLAG_trace_sim;
::v8::internal::FLAG_trace_sim = false;
@@ -361,7 +354,8 @@ void PPCDebugger::Debug() {
continue;
}
sim_->set_pc(value);
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
intptr_t* cur = nullptr;
intptr_t* end = nullptr;
int next_arg = 1;
@@ -388,20 +382,23 @@ void PPCDebugger::Debug() {
}
end = cur + words;
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %d", Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
}
- PrintF(")");
}
PrintF("\n");
cur++;
@@ -471,9 +468,7 @@ void PPCDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(nullptr)) {
- PrintF("deleting breakpoint failed\n");
- }
+ DeleteBreakpoint();
} else if (strcmp(cmd, "cr") == 0) {
PrintF("Condition reg: %08x\n", sim_->condition_reg_);
} else if (strcmp(cmd, "lr") == 0) {
@@ -493,7 +488,8 @@ void PPCDebugger::Debug() {
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->isStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
+ SetInstructionBitsInCodeSpace(stop_instr, kNopInstr,
+ sim_->isolate_->heap());
msg_address->SetInstructionBits(kNopInstr);
} else {
PrintF("Not at debugger stop.\n");
@@ -573,6 +569,10 @@ void PPCDebugger::Debug() {
PrintF(" dump stack content, default dump 10 words)\n");
PrintF("mem <address> [<num words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
PrintF("disasm [<instructions>]\n");
PrintF("disasm [<address/register>]\n");
PrintF("disasm [[<address/register>] <instructions>]\n");
@@ -613,9 +613,9 @@ void PPCDebugger::Debug() {
}
}
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
+ // Reinstall breakpoint to stop execution and enter the debugger shell when
+ // hit.
+ RedoBreakpoint();
// Restore tracing
::v8::internal::FLAG_trace_sim = trace;
@@ -1210,13 +1210,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_pc(saved_lr);
break;
}
- case kBreakpoint: {
- PPCDebugger dbg(this);
- dbg.Debug();
+ case kBreakpoint:
+ PPCDebugger(this).Debug();
break;
- }
// stop uses all codes greater than 1 << 23.
- default: {
+ default:
if (svc >= (1 << 23)) {
uint32_t code = svc & kStopCodeMask;
if (isWatchedStop(code)) {
@@ -1225,17 +1223,19 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
if (isEnabledStop(code)) {
- PPCDebugger dbg(this);
- dbg.Stop(instr);
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u. ", code);
+ } else {
+ PrintF("Simulator hit stop. ");
+ }
+ DebugAtNextPC();
} else {
set_pc(get_pc() + kInstrSize + kPointerSize);
}
} else {
// This is not a valid svc code.
UNREACHABLE();
- break;
}
- }
}
}
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.h b/deps/v8/src/execution/ppc/simulator-ppc.h
index 34a39f608b..76f836b196 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.h
+++ b/deps/v8/src/execution/ppc/simulator-ppc.h
@@ -238,6 +238,7 @@ class Simulator : public SimulatorBase {
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
+ void DebugAtNextPC();
// Stop helper functions.
inline bool isStopInstruction(Instruction* instr);
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index c1bdf021eb..7459f59ae1 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -9,6 +9,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/codegen/pending-optimization-table.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/handles/global-handles.h"
@@ -69,18 +70,20 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate), any_ic_changed_(false) {}
static void TraceRecompile(JSFunction function, const char* reason,
- const char* type) {
+ const char* type, Isolate* isolate) {
if (FLAG_trace_opt) {
- PrintF("[marking ");
- function.ShortPrint();
- PrintF(" for %s recompilation, reason: %s", type, reason);
- PrintF("]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[marking ");
+ function.ShortPrint(scope.file());
+ PrintF(scope.file(), " for %s recompilation, reason: %s", type, reason);
+ PrintF(scope.file(), "]\n");
}
}
void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
- TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
+ TraceRecompile(function, OptimizationReasonToString(reason), "optimized",
+ isolate_);
function.MarkForOptimization(ConcurrencyMode::kConcurrent);
}
@@ -99,9 +102,10 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
// BytecodeArray header so that certain back edges in any interpreter frame
// for this bytecode will trigger on-stack replacement for that frame.
if (FLAG_trace_osr) {
- PrintF("[OSR - arming back edges in ");
- function.PrintName();
- PrintF("]\n");
+ CodeTracer::Scope scope(isolate_->GetCodeTracer());
+ PrintF(scope.file(), "[OSR - arming back edges in ");
+ function.PrintName(scope.file());
+ PrintF(scope.file(), "]\n");
}
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index efa03f6e01..0893fffedf 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_S390_FRAME_CONSTANTS_S390_H_
#define V8_EXECUTION_S390_FRAME_CONSTANTS_S390_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -36,6 +37,43 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kDoubleSize;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {r2, r3, r4, r5, r6, r7, r8}
+ static constexpr uint32_t kPushedGpRegs = 0b111111100;
+ // {d0 .. d12}
+ static constexpr uint32_t kPushedFpRegs = 0b1111111111111;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -TypedFrameConstants::kFixedFrameSizeFromFp -
+ kSystemPointerSize * kNumPushedGpRegisters;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kDoubleSize * kNumPushedFpRegisters;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 985a941874..f41288f6a9 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -19,6 +19,7 @@
#include "src/codegen/s390/constants-s390.h"
#include "src/diagnostics/disasm.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
@@ -39,8 +40,6 @@ const Simulator::fpr_t Simulator::fp_zero;
class S390Debugger {
public:
explicit S390Debugger(Simulator* sim) : sim_(sim) {}
-
- void Stop(Instruction* instr);
void Debug();
private:
@@ -61,34 +60,20 @@ class S390Debugger {
bool GetValue(const char* desc, intptr_t* value);
bool GetFPDoubleValue(const char* desc, double* value);
- // Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instruction* break_pc);
- bool DeleteBreakpoint(Instruction* break_pc);
+ // Set or delete breakpoint (there can be only one).
+ bool SetBreakpoint(Instruction* breakpc);
+ void DeleteBreakpoint();
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
+ // Undo and redo the breakpoint. This is needed to bracket disassembly and
+ // execution to skip past the breakpoint when run from the debugger.
+ void UndoBreakpoint();
+ void RedoBreakpoint();
};
-void S390Debugger::Stop(Instruction* instr) {
- // Get the stop code.
- // use of kStopCodeMask not right on PowerPC
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() + sizeof(FourByteInstr));
- // Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
- sim_->watched_stops_[code].desc = msg;
- }
- // Print the stop message and code if it is not the default code.
- if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u: %s\n", code, msg);
- } else {
- PrintF("Simulator hit %s\n", msg);
- }
- sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
- Debug();
+void Simulator::DebugAtNextPC() {
+ PrintF("Starting debugger on the next instruction:\n");
+ set_pc(get_pc() + sizeof(FourByteInstr));
+ S390Debugger(this).Debug();
}
intptr_t S390Debugger::GetRegisterValue(int regnum) {
@@ -147,25 +132,33 @@ bool S390Debugger::SetBreakpoint(Instruction* break_pc) {
return true;
}
-bool S390Debugger::DeleteBreakpoint(Instruction* break_pc) {
- if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
+namespace {
+// This function is dangerous, but it's only available in non-production
+// (simulator) builds.
+void SetInstructionBitsInCodeSpace(Instruction* instr, Instr value,
+ Heap* heap) {
+ CodeSpaceMemoryModificationScope scope(heap);
+ instr->SetInstructionBits(value);
+}
+} // namespace
+void S390Debugger::DeleteBreakpoint() {
+ UndoBreakpoint();
sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
- return true;
}
-void S390Debugger::UndoBreakpoints() {
+void S390Debugger::UndoBreakpoint() {
if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ SetInstructionBitsInCodeSpace(sim_->break_pc_, sim_->break_instr_,
+ sim_->isolate_->heap());
}
}
-void S390Debugger::RedoBreakpoints() {
+void S390Debugger::RedoBreakpoint() {
if (sim_->break_pc_ != nullptr) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ SetInstructionBitsInCodeSpace(sim_->break_pc_, kBreakpointInstr,
+ sim_->isolate_->heap());
}
}
@@ -189,9 +182,9 @@ void S390Debugger::Debug() {
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
+ // Unset breakpoint while running in the debugger shell, making it invisible
+ // to all commands.
+ UndoBreakpoint();
// Disable tracing while simulating
bool trace = ::v8::internal::FLAG_trace_sim;
::v8::internal::FLAG_trace_sim = false;
@@ -391,7 +384,8 @@ void S390Debugger::Debug() {
continue;
}
sim_->set_pc(value);
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
intptr_t* cur = nullptr;
intptr_t* end = nullptr;
int next_arg = 1;
@@ -418,19 +412,22 @@ void S390Debugger::Debug() {
}
end = cur + words;
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi()) {
- PrintF(" (smi %d)", Smi::ToInt(obj));
- } else if (IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- obj.ShortPrint();
- PrintF(")");
+ if (!skip_obj_print) {
+ if (obj.IsSmi()) {
+ PrintF(" (smi %d)", Smi::ToInt(obj));
+ } else if (IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ obj.ShortPrint();
+ PrintF(")");
+ }
+ PrintF("\n");
}
- PrintF("\n");
cur++;
}
} else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
@@ -498,22 +495,21 @@ void S390Debugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(nullptr)) {
- PrintF("deleting breakpoint failed\n");
- }
+ DeleteBreakpoint();
} else if (strcmp(cmd, "cr") == 0) {
PrintF("Condition reg: %08x\n", sim_->condition_reg_);
} else if (strcmp(cmd, "stop") == 0) {
intptr_t value;
intptr_t stop_pc =
- sim_->get_pc() - (sizeof(FourByteInstr) + kPointerSize);
+ sim_->get_pc() - (sizeof(FourByteInstr) + kSystemPointerSize);
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
reinterpret_cast<Instruction*>(stop_pc + sizeof(FourByteInstr));
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->isStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
+ SetInstructionBitsInCodeSpace(stop_instr, kNopInstr,
+ sim_->isolate_->heap());
msg_address->SetInstructionBits(kNopInstr);
} else {
PrintF("Not at debugger stop.\n");
@@ -587,6 +583,10 @@ void S390Debugger::Debug() {
PrintF(" dump stack content, default dump 10 words)\n");
PrintF("mem <address> [<num words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
PrintF("disasm [<instructions>]\n");
PrintF("disasm [<address/register>]\n");
PrintF("disasm [[<address/register>] <instructions>]\n");
@@ -627,9 +627,9 @@ void S390Debugger::Debug() {
}
}
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
+ // Reinstall breakpoint to stop execution and enter the debugger shell when
+ // hit.
+ RedoBreakpoint();
// Restore tracing
::v8::internal::FLAG_trace_sim = trace;
@@ -747,17 +747,21 @@ void Simulator::EvalTableInit() {
#define S390_SUPPORTED_VECTOR_OPCODE_LIST(V) \
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
+ V(vlp, VLP, 0xE7DF) /* type = VRR_A VECTOR LOAD POSITIVE */ \
V(vlgv, VLGV, 0xE721) /* type = VRS_C VECTOR LOAD GR FROM VR ELEMENT */ \
V(vlvg, VLVG, 0xE722) /* type = VRS_B VECTOR LOAD VR ELEMENT FROM GR */ \
+ V(vlvgp, VLVGP, 0xE762) /* type = VRR_F VECTOR LOAD VR FROM GRS DISJOINT */ \
V(vrep, VREP, 0xE74D) /* type = VRI_C VECTOR REPLICATE */ \
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE */ \
V(vlr, VLR, 0xE756) /* type = VRR_A VECTOR LOAD */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
V(vlef, VLEF, 0xE703) /* type = VRX VECTOR LOAD ELEMENT (32) */ \
+ V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL */ \
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
V(vml, VML, 0xE7A2) /* type = VRR_C VECTOR MULTIPLY LOW */ \
+ V(vnc, VNC, 0xE769) /* type = VRR_C VECTOR AND WITH COMPLEMENT */ \
V(vsum, VSUM, 0xE764) /* type = VRR_C VECTOR SUM ACROSS WORD */ \
V(vsumg, VSUMG, 0xE765) /* type = VRR_C VECTOR SUM ACROSS DOUBLEWORD */ \
V(vpk, VPK, 0xE794) /* type = VRR_C VECTOR PACK */ \
@@ -777,14 +781,21 @@ void Simulator::EvalTableInit() {
V(vch, VCH, 0xE7FB) /* type = VRR_B VECTOR COMPARE HIGH */ \
V(vo, VO, 0xE76A) /* type = VRR_C VECTOR OR */ \
V(vn, VN, 0xE768) /* type = VRR_C VECTOR AND */ \
+ V(vno, VNO, 0xE768B) /* type = VRR_C VECTOR NOR */ \
V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \
V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
+ V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \
V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \
V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \
+ V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT */ \
V(vesrl, VESRL, \
0xE738) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \
+ V(vesrlv, VESRLV, \
+ 0xE778) /* type = VRR_C VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \
V(vesra, VESRA, \
0xE73A) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ \
+ V(vesrav, VESRAV, \
+ 0xE77A) /* type = VRR_C VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ \
V(vfsq, VFSQ, 0xE7CE) /* type = VRR_A VECTOR FP SQUARE ROOT */ \
V(vfmax, VFMAX, 0xE7EF) /* type = VRR_C VECTOR FP MAXIMUM */ \
V(vfmin, VFMIN, 0xE7EE) /* type = VRR_C VECTOR FP MINIMUM */ \
@@ -796,7 +807,10 @@ void Simulator::EvalTableInit() {
V(vfs, VFS, 0xE7E2) /* type = VRR_C VECTOR FP SUBTRACT */ \
V(vfa, VFA, 0xE7E3) /* type = VRR_C VECTOR FP ADD */ \
V(vfd, VFD, 0xE7E5) /* type = VRR_C VECTOR FP DIVIDE */ \
- V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY */
+ V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY */ \
+ V(vfma, VFMA, 0xE78F) /* type = VRR_E VECTOR FP MULTIPLY AND ADD */ \
+ V(vfnms, VFNMS, \
+ 0xE79E) /* type = VRR_E VECTOR FP NEGATIVE MULTIPLY AND SUBTRACT */
#define CREATE_EVALUATE_TABLE(name, op_name, op_value) \
EvalTable[op_name] = &Simulator::Evaluate_##op_name;
@@ -1912,8 +1926,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Remaining arguments on stack
intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
for (int i = kRegisterArgCount; i < kArgCount; i++) {
- arg[i] = stack_pointer[(kCalleeRegisterSaveAreaSize / kPointerSize) +
- (i - kRegisterArgCount)];
+ arg[i] =
+ stack_pointer[(kCalleeRegisterSaveAreaSize / kSystemPointerSize) +
+ (i - kRegisterArgCount)];
}
STATIC_ASSERT(kArgCount == kRegisterArgCount + 5);
STATIC_ASSERT(kMaxCParameters == kArgCount);
@@ -1925,7 +1940,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Place the return address on the stack, making the call GC safe.
*reinterpret_cast<intptr_t*>(get_register(sp) +
- kStackFrameRASlot * kPointerSize) =
+ kStackFrameRASlot * kSystemPointerSize) =
get_register(r14);
intptr_t external =
@@ -2193,7 +2208,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// #endif
}
int64_t saved_lr = *reinterpret_cast<intptr_t*>(
- get_register(sp) + kStackFrameRASlot * kPointerSize);
+ get_register(sp) + kStackFrameRASlot * kSystemPointerSize);
#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
// On zLinux-31, the saved_lr might be tagged with a high bit of 1.
// Cleanse it before proceeding with simulation.
@@ -2202,13 +2217,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_pc(saved_lr);
break;
}
- case kBreakpoint: {
- S390Debugger dbg(this);
- dbg.Debug();
+ case kBreakpoint:
+ S390Debugger(this).Debug();
break;
- }
// stop uses all codes greater than 1 << 23.
- default: {
+ default:
if (svc >= (1 << 23)) {
uint32_t code = svc & kStopCodeMask;
if (isWatchedStop(code)) {
@@ -2217,17 +2230,19 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
if (isEnabledStop(code)) {
- S390Debugger dbg(this);
- dbg.Stop(instr);
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u. ", code);
+ } else {
+ PrintF("Simulator hit stop. ");
+ }
+ DebugAtNextPC();
} else {
- set_pc(get_pc() + sizeof(FourByteInstr) + kPointerSize);
+ set_pc(get_pc() + sizeof(FourByteInstr) + kSystemPointerSize);
}
} else {
// This is not a valid svc code.
UNREACHABLE();
- break;
}
- }
}
}
@@ -2870,6 +2885,12 @@ uintptr_t Simulator::PopAddress() {
int m5 = AS(VRR_E_Instruction)->M5Value(); \
int length = 6;
+#define DECODE_VRR_F_INSTRUCTION(r1, r2, r3) \
+ int r1 = AS(VRR_F_Instruction)->R1Value(); \
+ int r2 = AS(VRR_F_Instruction)->R2Value(); \
+ int r3 = AS(VRR_F_Instruction)->R3Value(); \
+ int length = 6;
+
#define DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3) \
int r1 = AS(VRX_Instruction)->R1Value(); \
int x2 = AS(VRX_Instruction)->X2Value(); \
@@ -2927,6 +2948,78 @@ EVALUATE(VL) {
return length;
}
+#define VECTOR_LOAD_POSITIVE(r1, r2, type) \
+ for (size_t i = 0, j = 0; j < kSimd128Size; i++, j += sizeof(type)) { \
+ set_simd_register_by_lane<type>( \
+ r1, i, abs(get_simd_register_by_lane<type>(r2, i))); \
+ }
+EVALUATE(VLP) {
+ DCHECK(VL);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ switch (m3) {
+ case 0: {
+ VECTOR_LOAD_POSITIVE(r1, r2, int8_t)
+ break;
+ }
+ case 1: {
+ VECTOR_LOAD_POSITIVE(r1, r2, int16_t)
+ break;
+ }
+ case 2: {
+ VECTOR_LOAD_POSITIVE(r1, r2, int32_t)
+ break;
+ }
+ case 3: {
+ VECTOR_LOAD_POSITIVE(r1, r2, int64_t)
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return length;
+}
+#undef VECTOR_LOAD_POSITIVE
+
+#define VECTOR_AVERAGE_U(r1, r2, r3, type) \
+ for (size_t i = 0, j = 0; j < kSimd128Size; i++, j += sizeof(type)) { \
+ type src0 = get_simd_register_by_lane<type>(r2, i); \
+ type src1 = get_simd_register_by_lane<type>(r3, i); \
+ set_simd_register_by_lane<type>( \
+ r1, i, (static_cast<type>(src0) + static_cast<type>(src1) + 1) >> 1); \
+ }
+EVALUATE(VAVGL) {
+ DCHECK(VL);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ switch (m4) {
+ case 0: {
+ VECTOR_AVERAGE_U(r1, r2, r3, uint8_t)
+ break;
+ }
+ case 1: {
+ VECTOR_AVERAGE_U(r1, r2, r3, uint16_t)
+ break;
+ }
+ case 2: {
+ VECTOR_AVERAGE_U(r1, r2, r3, uint32_t)
+ break;
+ }
+ case 3: {
+ VECTOR_AVERAGE_U(r1, r2, r3, uint64_t)
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return length;
+}
+#undef VECTOR_AVERAGE_U
+
EVALUATE(VLGV) {
DCHECK_OPCODE(VLGV);
DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4);
@@ -2950,6 +3043,14 @@ EVALUATE(VLVG) {
return length;
}
+EVALUATE(VLVGP) {
+ DCHECK_OPCODE(VLVGP);
+ DECODE_VRR_F_INSTRUCTION(r1, r2, r3);
+ set_simd_register_by_lane<int64_t>(r1, 0, get_register(r2));
+ set_simd_register_by_lane<int64_t>(r1, 1, get_register(r3));
+ return length;
+}
+
EVALUATE(VREP) {
DCHECK_OPCODE(VREP);
DECODE_VRI_C_INSTRUCTION(r1, r3, i2, m4);
@@ -3084,6 +3185,20 @@ EVALUATE(VML) {
return length;
}
+EVALUATE(VNC) {
+ DCHECK(VNC);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ USE(m4);
+ for (int i = 0; i < 2; i++) {
+ int64_t lane_1 = get_simd_register_by_lane<uint64_t>(r2, i);
+ int64_t lane_2 = get_simd_register_by_lane<uint64_t>(r3, i);
+ set_simd_register_by_lane<uint64_t>(r1, i, lane_1 & ~lane_2);
+ }
+ return length;
+}
+
template <class S, class D>
void VectorSum(void* dst, void* src1, void* src2) {
D value = 0;
@@ -3490,6 +3605,42 @@ EVALUATE(VX) {
return length;
}
+#define VECTOR_NOR(r1, r2, r3, type) \
+ for (size_t i = 0, j = 0; j < kSimd128Size; i++, j += sizeof(type)) { \
+ type src0 = get_simd_register_by_lane<type>(r2, i); \
+ type src1 = get_simd_register_by_lane<type>(r3, i); \
+ set_simd_register_by_lane<type>(r1, i, ~(src0 | src1)); \
+ }
+EVALUATE(VNO) {
+ DCHECK(VL);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ switch (m4) {
+ case 0: {
+ VECTOR_NOR(r1, r2, r3, int8_t)
+ break;
+ }
+ case 1: {
+ VECTOR_NOR(r1, r2, r3, int16_t)
+ break;
+ }
+ case 2: {
+ VECTOR_NOR(r1, r2, r3, int32_t)
+ break;
+ }
+ case 3: {
+ VECTOR_NOR(r1, r2, r3, int64_t)
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return length;
+}
+#undef VECTOR_NOR
+
template <class T>
void VectorLoadComplement(void* dst, void* src) {
int8_t* src_ptr = reinterpret_cast<int8_t*>(src);
@@ -3530,6 +3681,27 @@ EVALUATE(VLC) {
return length;
}
+EVALUATE(VPERM) {
+ DCHECK_OPCODE(VPERM);
+ DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
+ USE(m5);
+ USE(m6);
+ for (int i = 0; i < kSimd128Size; i++) {
+ int8_t lane_num = get_simd_register_by_lane<int8_t>(r4, i);
+ int reg = r2;
+ if (lane_num >= kSimd128Size) {
+ lane_num = lane_num - kSimd128Size;
+ reg = r3;
+ }
+ int8_t result = 0;
+ if (lane_num >= 0 && lane_num < kSimd128Size * 2) {
+ result = get_simd_register_by_lane<int8_t>(reg, lane_num);
+ }
+ set_simd_register_by_lane<int8_t>(r1, i, result);
+ }
+ return length;
+}
+
EVALUATE(VSEL) {
DCHECK_OPCODE(VSEL);
DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
@@ -3606,6 +3778,65 @@ EVALUATE(VESRL) {
return length;
}
+#define VECTOR_SHIFT_WITH_OPERAND_TYPE(r1, r2, r3, type, op) \
+ for (size_t i = 0, j = 0; j < kSimd128Size; i++, j += sizeof(type)) { \
+ type src0 = get_simd_register_by_lane<type>(r2, i); \
+ type src1 = get_simd_register_by_lane<type>(r3, i); \
+ set_simd_register_by_lane<type>(r1, i, \
+ src0 op(src1 % (sizeof(type) * 8))); \
+ }
+
+#define VECTOR_SHIFT_WITH_OPERAND(r1, r2, r3, op, sign) \
+ switch (m4) { \
+ case 0: { \
+ VECTOR_SHIFT_WITH_OPERAND_TYPE(r1, r2, r3, sign##int8_t, op) \
+ break; \
+ } \
+ case 1: { \
+ VECTOR_SHIFT_WITH_OPERAND_TYPE(r1, r2, r3, sign##int16_t, op) \
+ break; \
+ } \
+ case 2: { \
+ VECTOR_SHIFT_WITH_OPERAND_TYPE(r1, r2, r3, sign##int32_t, op) \
+ break; \
+ } \
+ case 3: { \
+ VECTOR_SHIFT_WITH_OPERAND_TYPE(r1, r2, r3, sign##int64_t, op) \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ }
+
+EVALUATE(VESLV) {
+ DCHECK_OPCODE(VESLV);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ VECTOR_SHIFT_WITH_OPERAND(r1, r2, r3, <<, )
+ return length;
+}
+
+EVALUATE(VESRAV) {
+ DCHECK_OPCODE(VESRAV);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ VECTOR_SHIFT_WITH_OPERAND(r1, r2, r3, >>, )
+ return length;
+}
+
+EVALUATE(VESRLV) {
+ DCHECK_OPCODE(VESRLV);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ VECTOR_SHIFT_WITH_OPERAND(r1, r2, r3, >>, u)
+ return length;
+}
+#undef VECTOR_SHIFT_WITH_OPERAND
+#undef VECTOR_SHIFT_WITH_OPERAND_TYPE
+
EVALUATE(VTM) {
DCHECK_OPCODE(VTM);
DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
@@ -3688,6 +3919,63 @@ EVALUATE(VFD) {
return length;
}
+#define VECTOR_FP_MULTIPLY_QFMS_OPERATION(type, op, sign, first_lane_only) \
+ for (size_t i = 0, j = 0; j < kSimd128Size; i++, j += sizeof(type)) { \
+ type src0 = get_simd_register_by_lane<type>(r2, i); \
+ type src1 = get_simd_register_by_lane<type>(r3, i); \
+ type src2 = get_simd_register_by_lane<type>(r4, i); \
+ type result = sign * (src0 * src1 op src2); \
+ if (isinf(src0)) result = src0; \
+ if (isinf(src1)) result = src1; \
+ if (isinf(src2)) result = src2; \
+ set_simd_register_by_lane<type>(r1, i, result); \
+ if (first_lane_only) break; \
+ }
+
+#define VECTOR_FP_MULTIPLY_QFMS(op, sign) \
+ switch (m6) { \
+ case 2: \
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)); \
+ if (m5 == 8) { \
+ VECTOR_FP_MULTIPLY_QFMS_OPERATION(float, op, sign, true) \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ VECTOR_FP_MULTIPLY_QFMS_OPERATION(float, op, sign, false) \
+ } \
+ break; \
+ case 3: \
+ if (m5 == 8) { \
+ VECTOR_FP_MULTIPLY_QFMS_OPERATION(double, op, sign, true) \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ VECTOR_FP_MULTIPLY_QFMS_OPERATION(double, op, sign, false) \
+ } \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VFMA) {
+ DCHECK_OPCODE(VFMA);
+ DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
+ USE(m5);
+ USE(m6);
+ VECTOR_FP_MULTIPLY_QFMS(+, 1)
+ return length;
+}
+
+EVALUATE(VFNMS) {
+ DCHECK_OPCODE(VFNMS);
+ DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
+ USE(m5);
+ USE(m6);
+ VECTOR_FP_MULTIPLY_QFMS(-, -1)
+ return length;
+}
+#undef VECTOR_FP_MULTIPLY_QFMS
+#undef VECTOR_FP_MULTIPLY_QFMS_OPERATION
+
template <class T, class Operation>
void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) {
T* dst_ptr = reinterpret_cast<T*>(dst);
@@ -3707,8 +3995,13 @@ void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) {
#define VECTOR_FP_MAX_MIN_FOR_TYPE(type, op) \
VectorFPMaxMin<type>(&get_simd_register(r1), &get_simd_register(r2), \
- &get_simd_register(r3), \
- [](type a, type b) { return (a op b) ? a : b; });
+ &get_simd_register(r3), [](type a, type b) { \
+ if (signbit(b) op signbit(a)) \
+ return a; \
+ else if (signbit(b) != signbit(a)) \
+ return b; \
+ return (a op b) ? a : b; \
+ });
#define VECTOR_FP_MAX_MIN(op) \
switch (m4) { \
@@ -4388,13 +4681,14 @@ EVALUATE(LPR) {
// Load Positive (32)
DECODE_RR_INSTRUCTION(r1, r2);
int32_t r2_val = get_low_register<int32_t>(r2);
- // If negative, then negate it.
- r2_val = (r2_val < 0) ? -r2_val : r2_val;
- set_low_register(r1, r2_val);
SetS390ConditionCode<int32_t>(r2_val, 0);
if (r2_val == (static_cast<int32_t>(1) << 31)) {
SetS390OverflowCode(true);
+ } else {
+ // If negative and not overflowing, then negate it.
+ r2_val = (r2_val < 0) ? -r2_val : r2_val;
}
+ set_low_register(r1, r2_val);
return length;
}
@@ -6430,7 +6724,7 @@ EVALUATE(TRAP4) {
int length = 4;
// whack the space of the caller allocated stack
int64_t sp_addr = get_register(sp);
- for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
+ for (int i = 0; i < kCalleeRegisterSaveAreaSize / kSystemPointerSize; ++i) {
// we dont want to whack the RA (r14)
if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xDEADBABE;
}
@@ -7089,14 +7383,19 @@ EVALUATE(CFEBRA) {
break;
}
case ROUND_TOWARD_0: {
- // check for overflow, cast r2_fval to 64bit integer
+ // check for overflow, cast r2_fval to double
// then check value within the range of INT_MIN and INT_MAX
// and set condition code accordingly
- int64_t temp = static_cast<int64_t>(r2_fval);
- if (temp < INT_MIN || temp > INT_MAX) {
+ double temp = static_cast<double>(r2_fval);
+ if (temp < INT_MIN) {
+ r1_val = kMinInt;
+ condition_reg_ = CC_OF;
+ } else if (temp > INT_MAX) {
+ r1_val = kMaxInt;
condition_reg_ = CC_OF;
+ } else {
+ r1_val = static_cast<int32_t>(r2_fval);
}
- r1_val = static_cast<int32_t>(r2_fval);
break;
}
case ROUND_TOWARD_PLUS_INFINITE: {
@@ -7216,8 +7515,11 @@ EVALUATE(CLFEBR) {
DECODE_RRE_INSTRUCTION(r1, r2);
float r2_val = get_float32_from_d_register(r2);
uint32_t r1_val = static_cast<uint32_t>(r2_val);
- set_low_register(r1, r1_val);
SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+ double temp = static_cast<double>(r2_val);
+ if (temp < 0) r1_val = 0;
+ if (temp > kMaxUInt32) r1_val = kMaxUInt32;
+ set_low_register(r1, r1_val);
return length;
}
@@ -7743,12 +8045,14 @@ EVALUATE(LPGR) {
// Load Positive (32)
DECODE_RRE_INSTRUCTION(r1, r2);
int64_t r2_val = get_register(r2);
- r2_val = (r2_val < 0) ? -r2_val : r2_val; // If negative, then negate it.
- set_register(r1, r2_val);
SetS390ConditionCode<int64_t>(r2_val, 0);
if (r2_val == (static_cast<int64_t>(1) << 63)) {
SetS390OverflowCode(true);
+ } else {
+ // If negative and not overflowing, then negate it.
+ r2_val = (r2_val < 0) ? -r2_val : r2_val;
}
+ set_register(r1, r2_val);
return length;
}
@@ -10897,6 +11201,7 @@ EVALUATE(CXZT) {
#undef DECODE_VRR_B_INSTRUCTION
#undef DECODE_VRR_C_INSTRUCTION
#undef DECODE_VRR_E_INSTRUCTION
+#undef DECODE_VRR_F_INSTRUCTION
#undef DECODE_VRX_INSTRUCTION
#undef DECODE_VRS_INSTRUCTION
#undef DECODE_VRI_A_INSTRUCTION
diff --git a/deps/v8/src/execution/s390/simulator-s390.h b/deps/v8/src/execution/s390/simulator-s390.h
index 0921ac839e..57dbb1e3de 100644
--- a/deps/v8/src/execution/s390/simulator-s390.h
+++ b/deps/v8/src/execution/s390/simulator-s390.h
@@ -230,6 +230,7 @@ class Simulator : public SimulatorBase {
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
+ void DebugAtNextPC();
// Stop helper functions.
inline bool isStopInstruction(Instruction* instr);
@@ -457,7 +458,7 @@ class Simulator : public SimulatorBase {
// Simulator support.
char* stack_;
- static const size_t stack_protection_size_ = 256 * kPointerSize;
+ static const size_t stack_protection_size_ = 256 * kSystemPointerSize;
bool pc_modified_;
int64_t icount_;
@@ -502,6 +503,7 @@ class Simulator : public SimulatorBase {
S390_VRR_A_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
S390_VRR_C_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
S390_VRR_E_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRR_F_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
S390_VRX_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
S390_VRS_A_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
S390_VRS_B_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index 6eca3f2b47..58aa753a33 100644
--- a/deps/v8/src/execution/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -121,6 +121,7 @@ class SimulatorBase {
// - V8_TARGET_ARCH_MIPS: swi (software-interrupt)
// - V8_TARGET_ARCH_MIPS64: swi (software-interrupt)
// - V8_TARGET_ARCH_PPC: svc (Supervisor Call)
+// - V8_TARGET_ARCH_PPC64: svc (Supervisor Call)
// - V8_TARGET_ARCH_S390: svc (Supervisor Call)
class Redirection {
public:
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 4000973a24..a4e07b235b 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -18,7 +18,7 @@
#include "src/execution/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/execution/arm/simulator-arm.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/execution/ppc/simulator-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/execution/mips/simulator-mips.h"
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
index febd1ecb0a..b445158557 100644
--- a/deps/v8/src/execution/stack-guard.h
+++ b/deps/v8/src/execution/stack-guard.h
@@ -130,7 +130,7 @@ class V8_EXPORT_PRIVATE StackGuard final {
// The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and
// JavaScript stacks are separate. Each of the two stack limits have two
- // values. The one eith the real_ prefix is the actual stack limit
+ // values. The one with the real_ prefix is the actual stack limit
// set for the VM. The one without the real_ prefix has the same value as
// the actual stack limit except when there is an interruption (e.g. debug
// break or preemption) in which case it is lowered to make stack checks
diff --git a/deps/v8/src/execution/vm-state-inl.h b/deps/v8/src/execution/vm-state-inl.h
index 87dc185f2d..1781917764 100644
--- a/deps/v8/src/execution/vm-state-inl.h
+++ b/deps/v8/src/execution/vm-state-inl.h
@@ -14,11 +14,9 @@
namespace v8 {
namespace internal {
-//
-// VMState class implementation. A simple stack of VM states held by the
-// logger and partially threaded through the call stack. States are pushed by
-// VMState construction and popped by destruction.
-//
+// VMState class implementation. A simple stack of VM states held by the logger
+// and partially threaded through the call stack. States are pushed by VMState
+// construction and popped by destruction.
inline const char* StateToString(StateTag state) {
switch (state) {
case JS:
@@ -35,6 +33,8 @@ inline const char* StateToString(StateTag state) {
return "OTHER";
case EXTERNAL:
return "EXTERNAL";
+ case ATOMICS_WAIT:
+ return "ATOMICS_WAIT";
case IDLE:
return "IDLE";
}
diff --git a/deps/v8/src/execution/vm-state.h b/deps/v8/src/execution/vm-state.h
index 38b70f5a95..2ccde290d5 100644
--- a/deps/v8/src/execution/vm-state.h
+++ b/deps/v8/src/execution/vm-state.h
@@ -11,11 +11,10 @@
namespace v8 {
namespace internal {
-// Logging and profiling. A StateTag represents a possible state of
-// the VM. The logger maintains a stack of these. Creating a VMState
-// object enters a state by pushing on the stack, and destroying a
-// VMState object leaves a state by popping the current state from the
-// stack.
+// Logging and profiling. A StateTag represents a possible state of the VM. The
+// logger maintains a stack of these. Creating a VMState object enters a state
+// by pushing on the stack, and destroying a VMState object leaves a state by
+// popping the current state from the stack.
template <StateTag Tag>
class VMState {
public:
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index 409880cc7b..6ce0a02e14 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_X64_FRAME_CONSTANTS_X64_H_
#define V8_EXECUTION_X64_FRAME_CONSTANTS_X64_H_
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/execution/frame-constants.h"
@@ -54,6 +55,42 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
kNumberOfSavedFpParamRegs * kSimd128Size;
};
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {rax, rcx, rdx, rbx, rsi, rdi, r9}
+ static constexpr uint32_t kPushedGpRegs = 0b1011001111;
+ // {xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7}
+ static constexpr uint32_t kPushedFpRegs = 0b11111111;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kSimd128Size;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSimd128Size;
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/extensions/cputracemark-extension.cc b/deps/v8/src/extensions/cputracemark-extension.cc
index 6162afad5f..9dfa9761fd 100644
--- a/deps/v8/src/extensions/cputracemark-extension.cc
+++ b/deps/v8/src/extensions/cputracemark-extension.cc
@@ -16,12 +16,10 @@ CpuTraceMarkExtension::GetNativeFunctionTemplate(v8::Isolate* isolate,
void CpuTraceMarkExtension::Mark(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsUint32()) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(
- args.GetIsolate(),
- "First parameter to cputracemark() must be a unsigned int32.",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "First parameter to cputracemark() must be a unsigned int32."));
+ return;
}
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 00a946b6ff..b153ebd075 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -59,11 +59,9 @@ ExternalizeStringExtension::GetNativeFunctionTemplate(
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(
- args.GetIsolate(),
- "First parameter to externalizeString() must be a string.",
- NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "First parameter to externalizeString() must be a string."));
return;
}
bool force_two_byte = false;
@@ -71,22 +69,17 @@ void ExternalizeStringExtension::Externalize(
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue(args.GetIsolate());
} else {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(
- args.GetIsolate(),
- "Second parameter to externalizeString() must be a boolean.",
- NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "Second parameter to externalizeString() must be a boolean."));
return;
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (!string->SupportsExternalization()) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(),
- "string does not support externalization.",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "string does not support externalization."));
return;
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
@@ -105,10 +98,8 @@ void ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(),
- "externalizeString() failed.",
- NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(), "externalizeString() failed."));
return;
}
}
@@ -117,11 +108,9 @@ void ExternalizeStringExtension::Externalize(
void ExternalizeStringExtension::IsOneByte(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(
- args.GetIsolate(),
- "isOneByteString() requires a single string argument.",
- NewStringType::kNormal).ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "isOneByteString() requires a single string argument."));
return;
}
bool is_one_byte =
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 8f897ae97e..9d63d760ec 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -27,10 +27,10 @@ static void AddCounter(v8::Isolate* isolate,
StatsCounter* counter,
const char* name) {
if (counter->Enabled()) {
- object->Set(isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(isolate, *counter->GetInternalPointer()))
+ object
+ ->Set(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, name).ToLocalChecked(),
+ v8::Number::New(isolate, *counter->GetInternalPointer()))
.FromJust();
}
}
@@ -39,8 +39,7 @@ static void AddNumber(v8::Isolate* isolate, v8::Local<v8::Object> object,
double value, const char* name) {
object
->Set(isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
- .ToLocalChecked(),
+ v8::String::NewFromUtf8(isolate, name).ToLocalChecked(),
v8::Number::New(isolate, value))
.FromJust();
}
@@ -50,10 +49,11 @@ static void AddNumber64(v8::Isolate* isolate,
v8::Local<v8::Object> object,
int64_t value,
const char* name) {
- object->Set(isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(isolate, static_cast<double>(value))).FromJust();
+ object
+ ->Set(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, name).ToLocalChecked(),
+ v8::Number::New(isolate, static_cast<double>(value)))
+ .FromJust();
}
diff --git a/deps/v8/src/extensions/vtunedomain-support-extension.cc b/deps/v8/src/extensions/vtunedomain-support-extension.cc
index e5b68ff63d..793ffec716 100644
--- a/deps/v8/src/extensions/vtunedomain-support-extension.cc
+++ b/deps/v8/src/extensions/vtunedomain-support-extension.cc
@@ -109,13 +109,10 @@ void VTuneDomainSupportExtension::Mark(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsString() ||
!args[2]->IsString()) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(
- args.GetIsolate(),
- "Parameter number should be exactly three, first domain name"
- "second task name, third start/end",
- NewStringType::kNormal)
- .ToLocalChecked());
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
+ args.GetIsolate(),
+ "Parameter number should be exactly three, first domain name"
+ "second task name, third start/end"));
return;
}
@@ -134,8 +131,7 @@ void VTuneDomainSupportExtension::Mark(
int r = 0;
if ((r = libvtune::invoke(params.str().c_str())) != 0) {
args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8(args.GetIsolate(), std::to_string(r).c_str(),
- NewStringType::kNormal)
+ v8::String::NewFromUtf8(args.GetIsolate(), std::to_string(r).c_str())
.ToLocalChecked());
}
}
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 2eb2bbc80e..c3f360cdf0 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -134,6 +134,12 @@ struct MaybeBoolFlag {
#define COMPRESS_POINTERS_BOOL false
#endif
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL true
+#else
+#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
+#endif
+
// Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON
@@ -167,6 +173,8 @@ struct MaybeBoolFlag {
FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, {false COMMA false}, cmt)
#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
+#define DEFINE_UINT_READONLY(nam, def, cmt) \
+ FLAG_READONLY(UINT, unsigned int, nam, def, cmt)
#define DEFINE_UINT64(nam, def, cmt) FLAG(UINT64, uint64_t, nam, def, cmt)
#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_SIZE_T(nam, def, cmt) FLAG(SIZE_T, size_t, nam, def, cmt)
@@ -368,7 +376,7 @@ DEFINE_BOOL(assert_types, false,
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
-DEFINE_BOOL(always_promote_young_mc, false,
+DEFINE_BOOL(always_promote_young_mc, true,
"always promote young objects during mark-compact")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
@@ -397,7 +405,7 @@ DEFINE_BOOL_READONLY(internalize_on_the_fly, true,
"internalize string keys for generic keyed ICs on the fly")
// Flag for one shot optimiztions.
-DEFINE_BOOL(enable_one_shot_optimization, true,
+DEFINE_BOOL(enable_one_shot_optimization, false,
"Enable size optimizations for the code that will "
"only be executed once")
@@ -486,7 +494,7 @@ DEFINE_BOOL(turboprop, false,
"enable experimental turboprop mid-tier compiler.")
DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
DEFINE_IMPLICATION(turboprop, concurrent_inlining)
-DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 10 * KB)
+DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
@@ -636,6 +644,7 @@ DEFINE_BOOL(turbo_rewrite_far_jumps, true,
DEFINE_BOOL(
stress_gc_during_compilation, false,
"simulate GC/compiler thread race related to https://crbug.com/v8/8520")
+DEFINE_BOOL(turbo_fast_api_calls, false, "enable fast API calls from TurboFan")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
@@ -668,9 +677,12 @@ DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
DEFINE_BOOL(wasm_test_streaming, false,
"use streaming compilation instead of async compilation for tests")
-// TODO(4153): Set this back to v8::internal::wasm::kV8MaxWasmMemoryPages
-DEFINE_UINT(wasm_max_mem_pages, 32767,
- "maximum number of 64KiB memory pages of a wasm instance")
+DEFINE_UINT(wasm_max_mem_pages,
+ v8::internal::wasm::kSpecMaxWasmInitialMemoryPages,
+ "maximum initial number of 64KiB memory pages of a wasm instance")
+DEFINE_UINT(wasm_max_mem_pages_growth,
+ v8::internal::wasm::kSpecMaxWasmMaximumMemoryPages,
+ "maximum number of 64KiB pages a Wasm memory can grow to")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
DEFINE_UINT(wasm_max_code_space, v8::internal::kMaxWasmCodeMB,
@@ -709,6 +721,7 @@ DEFINE_INT(wasm_tier_mask_for_testing, 0,
DEFINE_BOOL(debug_in_liftoff, false,
"use Liftoff instead of the C++ interpreter for debugging "
"WebAssembly (experimental)")
+DEFINE_IMPLICATION(future, debug_in_liftoff)
DEFINE_BOOL(validate_asm, true, "validate asm.js modules before compiling")
DEFINE_BOOL(suppress_asm_messages, false,
@@ -723,7 +736,7 @@ DEFINE_DEBUG_BOOL(dump_wasm_module, false, "dump wasm module bytes")
DEFINE_STRING(dump_wasm_module_path, nullptr,
"directory to dump wasm modules to")
-// Declare command-line flags for WASM features. Warning: avoid using these
+// Declare command-line flags for Wasm features. Warning: avoid using these
// flags directly in the implementation. Instead accept wasm::WasmFeatures
// for configurability.
#include "src/wasm/wasm-feature-flags.h"
@@ -767,12 +780,20 @@ DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
-DEFINE_BOOL(wasm_grow_shared_memory, true,
- "allow growing shared WebAssembly memory objects")
-DEFINE_BOOL(wasm_atomics_on_non_shared_memory, false,
- "allow atomic operations on non-shared WebAssembly memory")
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
+
+// Flags for wasm prototyping that are not strictly features i.e., part of
+// an existing proposal that may be conditionally enabled.
+DEFINE_BOOL(wasm_atomics_on_non_shared_memory, false,
+ "allow atomic operations on non-shared WebAssembly memory")
+DEFINE_BOOL(wasm_grow_shared_memory, true,
+ "allow growing shared WebAssembly memory objects")
+DEFINE_BOOL(wasm_simd_post_mvp, false,
+ "allow experimental SIMD operations for prototyping that are not "
+ "included in the current proposal")
+DEFINE_IMPLICATION(wasm_simd_post_mvp, experimental_wasm_simd)
+
// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
@@ -781,9 +802,6 @@ DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
DEFINE_BOOL(stress_wasm_code_gc, false,
"stress test garbage collection of wasm code")
-DEFINE_BOOL(wasm_far_jump_table, true,
- "use multiple separate code spaces that might require far jumps "
- "between them")
DEFINE_INT(wasm_max_initial_code_space_reservation, 0,
"maximum size of the initial wasm code space reservation (in MB)")
@@ -801,9 +819,6 @@ DEFINE_SIZE_T(max_semi_space_size, 0,
"max size of a semi-space (in MBytes), the new space consists of "
"two semi-spaces")
DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
-DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
- "Grow the new space based on the percentage of survivors instead "
- "of their absolute value.")
DEFINE_SIZE_T(max_old_space_size, 0, "max size of the old space (in Mbytes)")
DEFINE_SIZE_T(
max_heap_size, 0,
@@ -811,7 +826,7 @@ DEFINE_SIZE_T(
"both max_semi_space_size and max_old_space_size take precedence. "
"All three flags cannot be specified at the same time.")
DEFINE_SIZE_T(initial_heap_size, 0, "initial size of the heap (in Mbytes)")
-DEFINE_BOOL(huge_max_old_generation_size, false,
+DEFINE_BOOL(huge_max_old_generation_size, true,
"Increase max size of the old space to 4 GB for x64 systems with"
"the physical memory bigger than 16 GB")
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
@@ -895,6 +910,7 @@ DEFINE_BOOL_READONLY(array_buffer_extension, V8_ARRAY_BUFFER_EXTENSION_BOOL,
DEFINE_IMPLICATION(array_buffer_extension, always_promote_young_mc)
DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers")
+DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks")
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
@@ -1014,8 +1030,6 @@ DEFINE_BOOL(young_generation_large_objects, true,
"allocates large objects by default in the young generation large "
"object space")
-DEFINE_BOOL(idle_time_scavenge, true, "Perform scavenges in idle time.")
-
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
@@ -1064,6 +1078,9 @@ DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
// api.cc
DEFINE_BOOL(script_streaming, true, "enable parsing on background")
+DEFINE_BOOL(
+ finalize_streaming_on_background, false,
+ "perform the script streaming finalization on the background thread")
DEFINE_BOOL(disable_old_api_accessors, false,
"Disable old-style API accessors whose setters trigger through the "
"prototype chain")
@@ -1211,6 +1228,15 @@ DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_BOOL(allow_natives_for_fuzzing, false,
+ "allow only natives explicitly whitelisted for fuzzers")
+DEFINE_BOOL(allow_natives_for_differential_fuzzing, false,
+ "allow only natives explicitly whitelisted for differential "
+ "fuzzers")
+DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, allow_natives_syntax)
+DEFINE_IMPLICATION(allow_natives_for_fuzzing, allow_natives_syntax)
+DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing,
+ allow_natives_for_fuzzing)
DEFINE_BOOL(parse_only, false, "only parse the sources")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
@@ -1233,8 +1259,6 @@ DEFINE_INT(sim_stack_size, 2 * MB / KB,
"in kBytes (default is 2 MB)")
DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
"When logging, try to use coloured output.")
-DEFINE_BOOL(ignore_asm_unimplemented_break, false,
- "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
@@ -1279,15 +1303,25 @@ DEFINE_GENERIC_IMPLICATION(
runtime_call_stats,
TracingFlags::runtime_stats.store(
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
+DEFINE_BOOL(rcs, false, "report runtime call counts and times")
+DEFINE_IMPLICATION(rcs, runtime_call_stats)
+
+DEFINE_BOOL(rcs_cpu_time, false,
+ "report runtime times in cpu time (the default is wall time)")
+DEFINE_IMPLICATION(rcs_cpu_time, rcs)
// snapshot-common.cc
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
"Collect statistics on serialized objects.")
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+DEFINE_UINT_READONLY(serialization_chunk_size, 1,
+ "Custom size for serialization chunks")
+#else
DEFINE_UINT(serialization_chunk_size, 4096,
"Custom size for serialization chunks")
-
+#endif
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
@@ -1380,6 +1414,18 @@ DEFINE_BOOL(multi_mapped_mock_allocator, false,
"Use a multi-mapped mock ArrayBuffer allocator for testing.")
#endif
+// Flags for Wasm GDB remote debugging.
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+#define DEFAULT_WASM_GDB_REMOTE_PORT 8765
+DEFINE_BOOL(wasm_gdb_remote, false,
+ "enable GDB-remote for WebAssembly debugging")
+DEFINE_INT(wasm_gdb_remote_port, DEFAULT_WASM_GDB_REMOTE_PORT,
+ "default port for WebAssembly debugging with LLDB.")
+DEFINE_BOOL(wasm_pause_waiting_for_debugger, false,
+ "pause at the first Webassembly instruction waiting for a debugger "
+ "to attach")
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
//
// GDB JIT integration flags.
//
@@ -1465,6 +1511,10 @@ DEFINE_BOOL(print_break_location, false, "print source location on debug break")
DEFINE_DEBUG_BOOL(trace_wasm_instances, false,
"trace creation and collection of wasm instances")
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+DEFINE_BOOL(trace_wasm_gdb_remote, false, "trace Webassembly GDB-remote server")
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
//
// Logging and profiling flags
//
@@ -1575,8 +1625,8 @@ DEFINE_BOOL(vtune_prof_annotate_wasm, false,
DEFINE_BOOL(win64_unwinding_info, true, "Enable unwinding info for Windows/x64")
-#ifdef V8_TARGET_ARCH_ARM
-// Unsupported on arm. See https://crbug.com/v8/8713.
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_S390X)
+// Unsupported on above architectures. See https://crbug.com/v8/8713.
DEFINE_BOOL_READONLY(
interpreted_frames_native_stack, false,
"Show interpreted frames on the native stack (useful for external "
@@ -1651,7 +1701,6 @@ DEFINE_BOOL(predictable_gc_schedule, false,
DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, min_semi_space_size, 4)
DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, max_semi_space_size, 4)
DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, heap_growing_percent, 30)
-DEFINE_NEG_IMPLICATION(predictable_gc_schedule, idle_time_scavenge)
DEFINE_NEG_IMPLICATION(predictable_gc_schedule, memory_reducer)
//
diff --git a/deps/v8/src/flags/flags.cc b/deps/v8/src/flags/flags.cc
index 15f87b0b74..e3564d0f6b 100644
--- a/deps/v8/src/flags/flags.cc
+++ b/deps/v8/src/flags/flags.cc
@@ -594,9 +594,12 @@ static uint32_t flag_hash = 0;
void ComputeFlagListHash() {
std::ostringstream modified_args_as_string;
-#ifdef DEBUG
- modified_args_as_string << "debug";
-#endif // DEBUG
+ if (COMPRESS_POINTERS_BOOL) {
+ modified_args_as_string << "ptr-compr";
+ }
+ if (DEBUG_BOOL) {
+ modified_args_as_string << "debug";
+ }
for (size_t i = 0; i < num_flags; ++i) {
Flag* current = &flags[i];
if (current->type() == Flag::TYPE_BOOL &&
diff --git a/deps/v8/src/handles/factory-handles.h b/deps/v8/src/handles/factory-handles.h
deleted file mode 100644
index 312ced8b7c..0000000000
--- a/deps/v8/src/handles/factory-handles.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HANDLES_FACTORY_HANDLES_H_
-#define V8_HANDLES_FACTORY_HANDLES_H_
-
-namespace v8 {
-namespace internal {
-
-template <typename Impl>
-struct FactoryTraits;
-
-template <typename Impl, typename T>
-using FactoryHandle = typename FactoryTraits<Impl>::template HandleType<T>;
-template <typename Impl, typename T>
-using FactoryMaybeHandle =
- typename FactoryTraits<Impl>::template MaybeHandleType<T>;
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HANDLES_FACTORY_HANDLES_H_
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index fcec0ca761..df4042e8eb 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -186,6 +186,9 @@ class GlobalHandles::NodeSpace final {
iterator begin() { return iterator(first_used_block_); }
iterator end() { return iterator(nullptr); }
+ size_t TotalSize() const { return blocks_ * sizeof(NodeType) * kBlockSize; }
+ size_t handles_count() const { return handles_count_; }
+
private:
void PutNodesOnFreeList(BlockType* block);
V8_INLINE void Free(NodeType* node);
@@ -194,6 +197,8 @@ class GlobalHandles::NodeSpace final {
BlockType* first_block_ = nullptr;
BlockType* first_used_block_ = nullptr;
NodeType* first_free_ = nullptr;
+ size_t blocks_ = 0;
+ size_t handles_count_ = 0;
};
template <class NodeType>
@@ -210,6 +215,7 @@ template <class NodeType>
NodeType* GlobalHandles::NodeSpace<NodeType>::Acquire(Object object) {
if (first_free_ == nullptr) {
first_block_ = new BlockType(global_handles_, this, first_block_);
+ blocks_++;
PutNodesOnFreeList(first_block_);
}
DCHECK_NOT_NULL(first_free_);
@@ -221,7 +227,7 @@ NodeType* GlobalHandles::NodeSpace<NodeType>::Acquire(Object object) {
block->ListAdd(&first_used_block_);
}
global_handles_->isolate()->counters()->global_handles()->Increment();
- global_handles_->handles_count_++;
+ handles_count_++;
DCHECK(node->IsInUse());
return node;
}
@@ -253,7 +259,7 @@ void GlobalHandles::NodeSpace<NodeType>::Free(NodeType* node) {
block->ListRemove(&first_used_block_);
}
global_handles_->isolate()->counters()->global_handles()->Decrement();
- global_handles_->handles_count_--;
+ handles_count_--;
}
template <class Child>
@@ -875,6 +881,19 @@ size_t GlobalHandles::NumberOfOnStackHandlesForTesting() {
return on_stack_nodes_->NumberOfHandlesForTesting();
}
+size_t GlobalHandles::TotalSize() const {
+ return regular_nodes_->TotalSize() + traced_nodes_->TotalSize();
+}
+
+size_t GlobalHandles::UsedSize() const {
+ return regular_nodes_->handles_count() * sizeof(Node) +
+ traced_nodes_->handles_count() * sizeof(TracedNode);
+}
+
+size_t GlobalHandles::handles_count() const {
+ return regular_nodes_->handles_count() + traced_nodes_->handles_count();
+}
+
void GlobalHandles::SetStackStart(void* stack_start) {
on_stack_nodes_->SetStackStart(stack_start);
}
@@ -1232,7 +1251,6 @@ void GlobalHandles::IterateYoungStrongAndDependentRoots(RootVisitor* v) {
v->VisitRootPointer(Root::kGlobalHandles, nullptr, node->location());
}
}
- on_stack_nodes_->Iterate(v);
}
void GlobalHandles::MarkYoungWeakUnmodifiedObjectsPending(
@@ -1517,6 +1535,9 @@ void GlobalHandles::IterateStrongRoots(RootVisitor* v) {
node->location());
}
}
+}
+
+void GlobalHandles::IterateStrongStackRoots(RootVisitor* v) {
on_stack_nodes_->Iterate(v);
}
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index e2e733d8f2..de08007d22 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -121,6 +121,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
void IterateStrongRoots(RootVisitor* v);
+ void IterateStrongStackRoots(RootVisitor* v);
void IterateWeakRoots(RootVisitor* v);
void IterateAllRoots(RootVisitor* v);
void IterateAllYoungRoots(RootVisitor* v);
@@ -175,8 +176,11 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
Isolate* isolate() const { return isolate_; }
+ size_t TotalSize() const;
+ size_t UsedSize() const;
+
// Number of global handles.
- size_t handles_count() const { return handles_count_; }
+ size_t handles_count() const;
size_t GetAndResetGlobalHandleResetCount() {
size_t old = number_of_phantom_handle_resets_;
@@ -236,8 +240,6 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
std::vector<TracedNode*> traced_young_nodes_;
std::unique_ptr<OnStackTracedNodeSpace> on_stack_nodes_;
- // Field always containing the number of handles to global objects.
- size_t handles_count_ = 0;
size_t number_of_phantom_handle_resets_ = 0;
std::vector<std::pair<Node*, PendingPhantomCallback>>
diff --git a/deps/v8/src/handles/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index 0f53edf5d6..c8b4b4556b 100644
--- a/deps/v8/src/handles/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -6,15 +6,25 @@
#define V8_HANDLES_HANDLES_INL_H_
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
+#include "src/handles/local-handles-inl.h"
#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
+class LocalHeap;
+
HandleBase::HandleBase(Address object, Isolate* isolate)
: location_(HandleScope::GetHandle(isolate, object)) {}
+HandleBase::HandleBase(Address object, OffThreadIsolate* isolate)
+ : location_(isolate->NewHandle(object)) {}
+
+HandleBase::HandleBase(Address object, LocalHeap* local_heap)
+ : location_(LocalHandleScope::GetHandle(local_heap, object)) {}
+
// Allocate a new handle for the object, do not canonicalize.
template <typename T>
@@ -34,33 +44,41 @@ Handle<T>::Handle(T object, Isolate* isolate)
: HandleBase(object.ptr(), isolate) {}
template <typename T>
+Handle<T>::Handle(T object, OffThreadIsolate* isolate)
+ : HandleBase(object.ptr(), isolate) {}
+
+template <typename T>
+Handle<T>::Handle(T object, LocalHeap* local_heap)
+ : HandleBase(object.ptr(), local_heap) {}
+
+template <typename T>
V8_INLINE Handle<T> handle(T object, Isolate* isolate) {
return Handle<T>(object, isolate);
}
-// Convenience overloads for cases where we want to either create a Handle or an
-// OffThreadHandle, depending on whether we have a Factory or an
-// OffThreadFactory.
template <typename T>
-V8_INLINE Handle<T> handle(T object, Factory* factory) {
- return factory->MakeHandle<T>(object);
+V8_INLINE Handle<T> handle(T object, OffThreadIsolate* isolate) {
+ return Handle<T>(object, isolate);
}
+
template <typename T>
-V8_INLINE OffThreadHandle<T> handle(T object, OffThreadFactory* factory) {
- // Convienently, we don't actually need the factory to create this handle.
- return OffThreadHandle<T>(object);
+V8_INLINE Handle<T> handle(T object, LocalHeap* local_heap) {
+ return Handle<T>(object, local_heap);
}
-// Similar convenience overloads for when we already have a Handle, but want
-// either a Handle or an OffThreadHandle.
+// Convenience overloads for when we already have a Handle, but want
+// either a Handle or an Handle.
template <typename T>
-V8_INLINE Handle<T> handle(Handle<T> handle, Factory* factory) {
+V8_INLINE Handle<T> handle(Handle<T> handle, Isolate* isolate) {
return handle;
}
template <typename T>
-V8_INLINE OffThreadHandle<T> handle(Handle<T> handle,
- OffThreadFactory* factory) {
- return OffThreadHandle<T>(*handle);
+V8_INLINE Handle<T> handle(Handle<T> handle, OffThreadIsolate* isolate) {
+ return Handle<T>(*handle);
+}
+template <typename T>
+V8_INLINE Handle<T> handle(Handle<T> handle, LocalHeap* local_heap) {
+ return Handle<T>(*handle, local_heap);
}
template <typename T>
@@ -197,6 +215,13 @@ inline SealHandleScope::~SealHandleScope() {
#endif
+template <typename T>
+Handle<T> OffThreadHandleScope::CloseAndEscape(Handle<T> handle_value) {
+ // At the moment, off-thread handle scopes do nothing on close, so we can
+ // safely return the same handle value.
+ return handle_value;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 87c435061e..87cbc6cc40 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -34,6 +34,8 @@ bool HandleBase::IsDereferenceAllowed() const {
if (object.IsSmi()) return true;
HeapObject heap_object = HeapObject::cast(object);
if (IsReadOnlyHeapObject(heap_object)) return true;
+ if (Heap::InOffThreadSpace(heap_object)) return true;
+
Isolate* isolate = GetIsolateFromWritableObject(heap_object);
RootIndex root_index;
if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 0c724ad2d9..b90a942df1 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -12,7 +12,6 @@
#include "src/base/macros.h"
#include "src/common/checks.h"
#include "src/common/globals.h"
-#include "src/handles/factory-handles.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -22,12 +21,15 @@ namespace internal {
class DeferredHandles;
class HandleScopeImplementer;
class Isolate;
+class LocalHeap;
+class OffThreadIsolate;
template <typename T>
class MaybeHandle;
class Object;
class OrderedHashMap;
class OrderedHashSet;
class OrderedNameDictionary;
+class RootVisitor;
class SmallOrderedHashMap;
class SmallOrderedHashSet;
class SmallOrderedNameDictionary;
@@ -39,6 +41,8 @@ class HandleBase {
public:
V8_INLINE explicit HandleBase(Address* location) : location_(location) {}
V8_INLINE explicit HandleBase(Address object, Isolate* isolate);
+ V8_INLINE explicit HandleBase(Address object, OffThreadIsolate* isolate);
+ V8_INLINE explicit HandleBase(Address object, LocalHeap* local_heap);
// Check if this handle refers to the exact same object as the other handle.
V8_INLINE bool is_identical_to(const HandleBase that) const {
@@ -99,7 +103,7 @@ class Handle final : public HandleBase {
T* operator->() { return &object_; }
private:
- friend class Handle;
+ friend class Handle<T>;
explicit ObjectRef(T object) : object_(object) {}
T object_;
@@ -119,6 +123,8 @@ class Handle final : public HandleBase {
}
V8_INLINE Handle(T object, Isolate* isolate);
+ V8_INLINE Handle(T object, OffThreadIsolate* isolate);
+ V8_INLINE Handle(T object, LocalHeap* local_heap);
// Allocate a new handle for the object, do not canonicalize.
V8_INLINE static Handle<T> New(T object, Isolate* isolate);
@@ -176,119 +182,6 @@ template <typename T>
inline std::ostream& operator<<(std::ostream& os, Handle<T> handle);
// ----------------------------------------------------------------------------
-// A fake Handle that simply wraps an object reference. This is used for
-// off-thread Objects, where we want a class that behaves like Handle for the
-// purposes of operator->, casting, etc., but isn't a GC root and doesn't
-// require access to the Isolate.
-template <typename T>
-class OffThreadHandle {
- public:
- OffThreadHandle() = default;
-
- template <typename U>
- explicit OffThreadHandle(U obj) : obj_(obj) {}
-
- // Constructor for handling automatic up casting. We rely on the compiler
- // making sure that the assignment to obj_ is legitimate.
- template <typename U>
- // NOLINTNEXTLINE
- OffThreadHandle<T>(OffThreadHandle<U> other) : obj_(*other) {}
-
- T operator*() const { return obj_; }
- T* operator->() { return &obj_; }
- const T* operator->() const { return &obj_; }
-
- template <typename U>
- static OffThreadHandle<T> cast(OffThreadHandle<U> other) {
- return OffThreadHandle<T>(T::cast(*other));
- }
-
- bool is_null() const {
- // TODO(leszeks): This will only work for HeapObjects, figure out a way to
- // make is_null work for Object and Smi too.
- return obj_.is_null();
- }
-
- bool ToHandle(OffThreadHandle<T>* out) {
- if (is_null()) return false;
-
- *out = *this;
- return true;
- }
- OffThreadHandle<T> ToHandleChecked() {
- DCHECK(!is_null());
- return *this;
- }
-
- private:
- T obj_;
-};
-
-// A helper class which wraps an normal or off-thread handle, and returns one
-// or the other depending on the factory type.
-template <typename T>
-class HandleOrOffThreadHandle {
- public:
- HandleOrOffThreadHandle() = default;
-
- template <typename U>
- HandleOrOffThreadHandle(Handle<U> handle) // NOLINT
- : value_(bit_cast<Address>(static_cast<Handle<T>>(handle).location())) {
-#ifdef DEBUG
- which_ = kHandle;
-#endif
- }
-
- template <typename U>
- HandleOrOffThreadHandle(OffThreadHandle<U> handle) // NOLINT
- : value_(static_cast<OffThreadHandle<T>>(handle)->ptr()) {
-#ifdef DEBUG
- which_ = kOffThreadHandle;
-#endif
- }
-
- // To minimize the impact of these handles on main-thread callers, we allow
- // them to implicitly convert to Handles.
- template <typename U>
- operator Handle<U>() {
- return get<class Factory>();
- }
-
- template <typename FactoryType>
- inline FactoryHandle<FactoryType, T> get() {
- return get_for(Tag<FactoryType>());
- }
-
- inline bool is_null() const { return value_ == 0; }
-
-#ifdef DEBUG
- inline bool is_initialized() { return which_ != kUninitialized; }
-#endif
-
- private:
- // Tagged overloads because we can't specialize the above getter
- // without also specializing the class.
- template <typename FactoryType>
- struct Tag {};
-
- V8_INLINE Handle<T> get_for(Tag<class Factory>) {
- DCHECK_NE(which_, kOffThreadHandle);
- return Handle<T>(reinterpret_cast<Address*>(value_));
- }
- V8_INLINE OffThreadHandle<T> get_for(Tag<class OffThreadFactory>) {
- DCHECK_NE(which_, kHandle);
- return OffThreadHandle<T>(T::unchecked_cast(Object(value_)));
- }
-
- // Either handle.location() or off_thread_handle->ptr().
- Address value_;
-
-#ifdef DEBUG
- enum { kUninitialized, kHandle, kOffThreadHandle } which_;
-#endif
-};
-
-// ----------------------------------------------------------------------------
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
// allocated within that handle scope until either the handle scope is
@@ -474,6 +367,16 @@ struct HandleScopeData final {
}
};
+class OffThreadHandleScope {
+ public:
+ // Off-thread Handles are allocated in the parse/compile zone, and not
+ // cleared out, so the scope doesn't have to do anything
+ explicit OffThreadHandleScope(OffThreadIsolate* isolate) {}
+
+ template <typename T>
+ inline Handle<T> CloseAndEscape(Handle<T> handle_value);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/local-handles-inl.h b/deps/v8/src/handles/local-handles-inl.h
new file mode 100644
index 0000000000..8f24385621
--- /dev/null
+++ b/deps/v8/src/handles/local-handles-inl.h
@@ -0,0 +1,61 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_LOCAL_HANDLES_INL_H_
+#define V8_HANDLES_LOCAL_HANDLES_INL_H_
+
+#include "src/handles/local-handles.h"
+
+#include "src/sanitizer/msan.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+V8_INLINE Address* LocalHandleScope::GetHandle(LocalHeap* local_heap,
+ Address value) {
+ LocalHandles* handles = local_heap->handles();
+ Address* result = handles->scope_.next;
+ if (result == handles->scope_.limit) {
+ result = handles->AddBlock();
+ }
+ DCHECK_LT(result, handles->scope_.limit);
+ handles->scope_.next++;
+ *result = value;
+ return result;
+}
+
+LocalHandleScope::LocalHandleScope(LocalHeap* local_heap) {
+ LocalHandles* handles = local_heap->handles();
+ local_heap_ = local_heap;
+ prev_next_ = handles->scope_.next;
+ prev_limit_ = handles->scope_.limit;
+ handles->scope_.level++;
+}
+
+LocalHandleScope::~LocalHandleScope() {
+ LocalHandles* handles = local_heap_->handles();
+ Address* old_limit = handles->scope_.limit;
+
+ handles->scope_.next = prev_next_;
+ handles->scope_.limit = prev_limit_;
+ handles->scope_.level--;
+
+ if (old_limit != handles->scope_.limit) {
+ handles->RemoveBlocks();
+ old_limit = handles->scope_.limit;
+ }
+
+ // TODO(dinfuehr): Zap handles
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(
+ handles->scope_.next,
+ static_cast<size_t>(reinterpret_cast<Address>(old_limit) -
+ reinterpret_cast<Address>(handles->scope_.next)));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_LOCAL_HANDLES_INL_H_
diff --git a/deps/v8/src/handles/local-handles.cc b/deps/v8/src/handles/local-handles.cc
new file mode 100644
index 0000000000..26e4fb596a
--- /dev/null
+++ b/deps/v8/src/handles/local-handles.cc
@@ -0,0 +1,58 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handles/local-handles.h"
+
+#include "src/api/api.h"
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+LocalHandles::LocalHandles() { scope_.Initialize(); }
+
+void LocalHandles::Iterate(RootVisitor* visitor) {
+ for (int i = 0; i < static_cast<int>(blocks_.size()) - 1; i++) {
+ Address* block = blocks_[i];
+ visitor->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(block),
+ FullObjectSlot(&block[kHandleBlockSize]));
+ }
+
+ if (!blocks_.empty()) {
+ Address* block = blocks_.back();
+ visitor->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(block),
+ FullObjectSlot(scope_.next));
+ }
+}
+
+Address* LocalHandles::AddBlock() {
+ DCHECK_EQ(scope_.next, scope_.limit);
+ Address* block = NewArray<Address>(kHandleBlockSize);
+ blocks_.push_back(block);
+ scope_.next = block;
+ scope_.limit = block + kHandleBlockSize;
+ return block;
+}
+
+void LocalHandles::RemoveBlocks() {
+ while (!blocks_.empty()) {
+ Address* block_start = blocks_.back();
+ Address* block_limit = block_start + kHandleBlockSize;
+
+ if (block_limit == scope_.limit) {
+ break;
+ }
+
+ blocks_.pop_back();
+
+ // TODO(dinfuehr): Zap handles in block
+
+ DeleteArray(block_start);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/handles/local-handles.h b/deps/v8/src/handles/local-handles.h
new file mode 100644
index 0000000000..561ac65765
--- /dev/null
+++ b/deps/v8/src/handles/local-handles.h
@@ -0,0 +1,57 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_LOCAL_HANDLES_H_
+#define V8_HANDLES_LOCAL_HANDLES_H_
+
+#include "include/v8-internal.h"
+#include "src/base/functional.h"
+#include "src/base/macros.h"
+#include "src/handles/handles.h"
+#include "src/heap/local-heap.h"
+
+namespace v8 {
+namespace internal {
+
+class RootVisitor;
+
+class LocalHandles {
+ public:
+ LocalHandles();
+
+ void Iterate(RootVisitor* visitor);
+
+ private:
+ HandleScopeData scope_;
+ std::vector<Address*> blocks_;
+
+ V8_EXPORT_PRIVATE Address* AddBlock();
+ V8_EXPORT_PRIVATE void RemoveBlocks();
+
+ friend class LocalHandleScope;
+};
+
+class LocalHandleScope {
+ public:
+ explicit inline LocalHandleScope(LocalHeap* local_heap);
+ inline ~LocalHandleScope();
+
+ V8_INLINE static Address* GetHandle(LocalHeap* local_heap, Address value);
+
+ private:
+ // Prevent heap allocation or illegal handle scopes.
+ void* operator new(size_t size);
+ void operator delete(void* size_t);
+
+ LocalHeap* local_heap_;
+ Address* prev_limit_;
+ Address* prev_next_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocalHandleScope);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_LOCAL_HANDLES_H_
diff --git a/deps/v8/src/handles/maybe-handles.h b/deps/v8/src/handles/maybe-handles.h
index 0b93bf82ea..d804374088 100644
--- a/deps/v8/src/handles/maybe-handles.h
+++ b/deps/v8/src/handles/maybe-handles.h
@@ -12,6 +12,10 @@
namespace v8 {
namespace internal {
+struct NullMaybeHandleType {};
+
+constexpr NullMaybeHandleType kNullMaybeHandle;
+
// ----------------------------------------------------------------------------
// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
// into a Handle requires checking that it does not point to nullptr. This
@@ -25,6 +29,8 @@ class MaybeHandle final {
public:
V8_INLINE MaybeHandle() = default;
+ V8_INLINE MaybeHandle(NullMaybeHandleType) {}
+
// Constructor for handling automatic up casting from Handle.
// Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
template <typename S, typename = typename std::enable_if<
@@ -50,7 +56,7 @@ class MaybeHandle final {
// Convert to a Handle with a type that can be upcasted to.
template <typename S>
- V8_INLINE bool ToHandle(Handle<S>* out) const {
+ V8_WARN_UNUSED_RESULT V8_INLINE bool ToHandle(Handle<S>* out) const {
if (location_ == nullptr) {
*out = Handle<T>::null();
return false;
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index c5ceee075f..5a02732930 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1,6 +1,10 @@
+bikineev@chromium.org
dinfuehr@chromium.org
hpayer@chromium.org
mlippautz@chromium.org
+omerkatz@chromium.org
ulan@chromium.org
+per-file *factory*=leszeks@chromium.org
+
# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index d11b08932c..9401db11d5 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -21,6 +21,7 @@ void ArrayBufferList::Append(ArrayBufferExtension* extension) {
tail_ = extension;
}
+ bytes_ += extension->accounting_length();
extension->set_next(nullptr);
}
@@ -37,6 +38,7 @@ void ArrayBufferList::Append(ArrayBufferList* list) {
DCHECK_NULL(list->tail_);
}
+ bytes_ += list->Bytes();
list->Reset();
}
@@ -51,22 +53,35 @@ bool ArrayBufferList::Contains(ArrayBufferExtension* extension) {
return false;
}
+size_t ArrayBufferList::BytesSlow() {
+ ArrayBufferExtension* current = head_;
+ size_t sum = 0;
+
+ while (current) {
+ sum += current->accounting_length();
+ current = current->next();
+ }
+
+ return sum;
+}
+
void ArrayBufferSweeper::EnsureFinished() {
if (!sweeping_in_progress_) return;
+ CHECK(V8_ARRAY_BUFFER_EXTENSION_BOOL);
TryAbortResult abort_result =
heap_->isolate()->cancelable_task_manager()->TryAbort(job_.id);
switch (abort_result) {
case TryAbortResult::kTaskAborted: {
- job_.Sweep();
+ Sweep();
Merge();
break;
}
case TryAbortResult::kTaskRemoved: {
CHECK_NE(job_.state, SweepingState::Uninitialized);
- if (job_.state == SweepingState::Prepared) job_.Sweep();
+ if (job_.state == SweepingState::Prepared) Sweep();
Merge();
break;
}
@@ -86,9 +101,25 @@ void ArrayBufferSweeper::EnsureFinished() {
UNREACHABLE();
}
+ DecrementExternalMemoryCounters();
sweeping_in_progress_ = false;
}
+void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
+ size_t bytes = freed_bytes_.load(std::memory_order_relaxed);
+ if (bytes == 0) return;
+
+ while (!freed_bytes_.compare_exchange_weak(bytes, 0)) {
+ // empty body
+ }
+
+ if (bytes == 0) return;
+
+ heap_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, bytes);
+ heap_->update_external_memory(-static_cast<int64_t>(bytes));
+}
+
void ArrayBufferSweeper::RequestSweepYoung() {
RequestSweep(SweepingScope::Young);
}
@@ -97,12 +128,18 @@ void ArrayBufferSweeper::RequestSweepFull() {
RequestSweep(SweepingScope::Full);
}
+size_t ArrayBufferSweeper::YoungBytes() { return young_bytes_; }
+
+size_t ArrayBufferSweeper::OldBytes() { return old_bytes_; }
+
void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
DCHECK(!sweeping_in_progress_);
if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::Young))
return;
+ CHECK(V8_ARRAY_BUFFER_EXTENSION_BOOL);
+
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
FLAG_concurrent_array_buffer_sweeping) {
Prepare(scope);
@@ -112,7 +149,7 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
heap_->tracer(),
GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_SWEEP);
base::MutexGuard guard(&sweeping_mutex_);
- job_.Sweep();
+ Sweep();
job_finished_.NotifyAll();
});
job_.id = task->id();
@@ -120,8 +157,9 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
sweeping_in_progress_ = true;
} else {
Prepare(scope);
- job_.Sweep();
+ Sweep();
Merge();
+ DecrementExternalMemoryCounters();
}
}
@@ -144,6 +182,8 @@ void ArrayBufferSweeper::Merge() {
CHECK_EQ(job_.state, SweepingState::Swept);
young_.Append(&job_.young);
old_.Append(&job_.old);
+ young_bytes_ = young_.Bytes();
+ old_bytes_ = old_.Bytes();
job_.state = SweepingState::Uninitialized;
}
@@ -151,6 +191,7 @@ void ArrayBufferSweeper::ReleaseAll() {
EnsureFinished();
ReleaseAll(&old_);
ReleaseAll(&young_);
+ old_bytes_ = young_bytes_ = 0;
}
void ArrayBufferSweeper::ReleaseAll(ArrayBufferList* list) {
@@ -167,11 +208,26 @@ void ArrayBufferSweeper::ReleaseAll(ArrayBufferList* list) {
void ArrayBufferSweeper::Append(JSArrayBuffer object,
ArrayBufferExtension* extension) {
+ CHECK(V8_ARRAY_BUFFER_EXTENSION_BOOL);
+ size_t bytes = extension->accounting_length();
+
if (Heap::InYoungGeneration(object)) {
young_.Append(extension);
+ young_bytes_ += bytes;
} else {
old_.Append(extension);
+ old_bytes_ += bytes;
}
+
+ DecrementExternalMemoryCounters();
+ IncrementExternalMemoryCounters(bytes);
+}
+
+void ArrayBufferSweeper::IncrementExternalMemoryCounters(size_t bytes) {
+ heap_->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, bytes);
+ reinterpret_cast<v8::Isolate*>(heap_->isolate())
+ ->AdjustAmountOfExternalAllocatedMemory(static_cast<int64_t>(bytes));
}
ArrayBufferSweeper::SweepingJob::SweepingJob()
@@ -188,52 +244,53 @@ ArrayBufferSweeper::SweepingJob ArrayBufferSweeper::SweepingJob::Prepare(
return job;
}
-void ArrayBufferSweeper::SweepingJob::Sweep() {
- CHECK_EQ(state, SweepingState::Prepared);
+void ArrayBufferSweeper::Sweep() {
+ CHECK_EQ(job_.state, SweepingState::Prepared);
- if (scope == SweepingScope::Young) {
+ if (job_.scope == SweepingScope::Young) {
SweepYoung();
} else {
- CHECK_EQ(scope, SweepingScope::Full);
+ CHECK_EQ(job_.scope, SweepingScope::Full);
SweepFull();
}
- state = SweepingState::Swept;
+ job_.state = SweepingState::Swept;
}
-void ArrayBufferSweeper::SweepingJob::SweepFull() {
- CHECK_EQ(scope, SweepingScope::Full);
- ArrayBufferList promoted = SweepListFull(&young);
- ArrayBufferList survived = SweepListFull(&old);
+void ArrayBufferSweeper::SweepFull() {
+ CHECK_EQ(job_.scope, SweepingScope::Full);
+ ArrayBufferList promoted = SweepListFull(&job_.young);
+ ArrayBufferList survived = SweepListFull(&job_.old);
- old = promoted;
- old.Append(&survived);
+ job_.old = promoted;
+ job_.old.Append(&survived);
}
-ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
- ArrayBufferList* list) {
+ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
ArrayBufferExtension* current = list->head_;
- ArrayBufferList survived;
+ ArrayBufferList survivor_list;
while (current) {
ArrayBufferExtension* next = current->next();
if (!current->IsMarked()) {
+ size_t bytes = current->accounting_length();
delete current;
+ IncrementFreedBytes(bytes);
} else {
current->Unmark();
- survived.Append(current);
+ survivor_list.Append(current);
}
current = next;
}
list->Reset();
- return survived;
+ return survivor_list;
}
-void ArrayBufferSweeper::SweepingJob::SweepYoung() {
- CHECK_EQ(scope, SweepingScope::Young);
- ArrayBufferExtension* current = young.head_;
+void ArrayBufferSweeper::SweepYoung() {
+ CHECK_EQ(job_.scope, SweepingScope::Young);
+ ArrayBufferExtension* current = job_.young.head_;
ArrayBufferList new_young;
ArrayBufferList new_old;
@@ -242,7 +299,9 @@ void ArrayBufferSweeper::SweepingJob::SweepYoung() {
ArrayBufferExtension* next = current->next();
if (!current->IsYoungMarked()) {
+ size_t bytes = current->accounting_length();
delete current;
+ IncrementFreedBytes(bytes);
} else if (current->IsYoungPromoted()) {
current->YoungUnmark();
new_old.Append(current);
@@ -254,8 +313,13 @@ void ArrayBufferSweeper::SweepingJob::SweepYoung() {
current = next;
}
- old = new_old;
- young = new_young;
+ job_.old = new_old;
+ job_.young = new_young;
+}
+
+void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
+ if (bytes == 0) return;
+ freed_bytes_.fetch_add(bytes);
}
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-sweeper.h b/deps/v8/src/heap/array-buffer-sweeper.h
index a2605a43d2..5cedb2b8f8 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.h
+++ b/deps/v8/src/heap/array-buffer-sweeper.h
@@ -18,17 +18,24 @@ class Heap;
// Singly linked-list of ArrayBufferExtensions that stores head and tail of the
// list to allow for concatenation of lists.
struct ArrayBufferList {
- ArrayBufferList() : head_(nullptr), tail_(nullptr) {}
+ ArrayBufferList() : head_(nullptr), tail_(nullptr), bytes_(0) {}
ArrayBufferExtension* head_;
ArrayBufferExtension* tail_;
+ size_t bytes_;
bool IsEmpty() {
DCHECK_IMPLIES(head_, tail_);
return head_ == nullptr;
}
- void Reset() { head_ = tail_ = nullptr; }
+ size_t Bytes() { return bytes_; }
+ size_t BytesSlow();
+
+ void Reset() {
+ head_ = tail_ = nullptr;
+ bytes_ = 0;
+ }
void Append(ArrayBufferExtension* extension);
void Append(ArrayBufferList* list);
@@ -41,7 +48,11 @@ struct ArrayBufferList {
class ArrayBufferSweeper {
public:
explicit ArrayBufferSweeper(Heap* heap)
- : heap_(heap), sweeping_in_progress_(false) {}
+ : heap_(heap),
+ sweeping_in_progress_(false),
+ freed_bytes_(0),
+ young_bytes_(0),
+ old_bytes_(0) {}
~ArrayBufferSweeper() { ReleaseAll(); }
void EnsureFinished();
@@ -53,6 +64,9 @@ class ArrayBufferSweeper {
ArrayBufferList young() { return young_; }
ArrayBufferList old() { return old_; }
+ size_t YoungBytes();
+ size_t OldBytes();
+
private:
enum class SweepingScope { Young, Full };
@@ -67,20 +81,24 @@ class ArrayBufferSweeper {
SweepingJob();
- void Sweep();
- void SweepYoung();
- void SweepFull();
- ArrayBufferList SweepListFull(ArrayBufferList* list);
static SweepingJob Prepare(ArrayBufferList young, ArrayBufferList old,
SweepingScope scope);
} job_;
void Merge();
+ void DecrementExternalMemoryCounters();
+ void IncrementExternalMemoryCounters(size_t bytes);
+ void IncrementFreedBytes(size_t bytes);
+
void RequestSweep(SweepingScope sweeping_task);
void Prepare(SweepingScope sweeping_task);
+ void Sweep();
+ void SweepYoung();
+ void SweepFull();
ArrayBufferList SweepListFull(ArrayBufferList* list);
+
ArrayBufferList SweepYoungGen();
void SweepOldGen(ArrayBufferExtension* extension);
@@ -91,9 +109,13 @@ class ArrayBufferSweeper {
bool sweeping_in_progress_;
base::Mutex sweeping_mutex_;
base::ConditionVariable job_finished_;
+ std::atomic<size_t> freed_bytes_;
ArrayBufferList young_;
ArrayBufferList old_;
+
+ size_t young_bytes_;
+ size_t old_bytes_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 63d4147d41..b2d1115cba 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -20,14 +20,6 @@
namespace v8 {
namespace internal {
-inline size_t PerIsolateAccountingLength(JSArrayBuffer buffer) {
- // TODO(titzer): SharedArrayBuffers and shared WasmMemorys cause problems with
- // accounting for per-isolate external memory. In particular, sharing the same
- // array buffer or memory multiple times, which happens in stress tests, can
- // cause overcounting, leading to GC thrashing. Fix with global accounting?
- return buffer.is_shared() ? 0 : buffer.byte_length();
-}
-
void ArrayBufferTracker::RegisterNew(
Heap* heap, JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store) {
@@ -38,6 +30,7 @@ void ArrayBufferTracker::RegisterNew(
// ArrayBuffer tracking works only for small objects.
DCHECK(!heap->IsLargeObject(buffer));
DCHECK_EQ(backing_store->buffer_start(), buffer.backing_store());
+ const size_t length = backing_store->PerIsolateAccountingLength();
Page* page = Page::FromHeapObject(buffer);
{
@@ -57,7 +50,6 @@ void ArrayBufferTracker::RegisterNew(
// TODO(wez): Remove backing-store from external memory accounting.
// We may go over the limit of externally allocated memory here. We call the
// api function to trigger a GC in this case.
- const size_t length = PerIsolateAccountingLength(buffer);
reinterpret_cast<v8::Isolate*>(heap->isolate())
->AdjustAmountOfExternalAllocatedMemory(length);
}
@@ -66,7 +58,6 @@ std::shared_ptr<BackingStore> ArrayBufferTracker::Unregister(
Heap* heap, JSArrayBuffer buffer) {
std::shared_ptr<BackingStore> backing_store;
- const size_t length = PerIsolateAccountingLength(buffer);
Page* page = Page::FromHeapObject(buffer);
{
base::MutexGuard guard(page->mutex());
@@ -76,6 +67,7 @@ std::shared_ptr<BackingStore> ArrayBufferTracker::Unregister(
}
// TODO(wez): Remove backing-store from external memory accounting.
+ const size_t length = backing_store->PerIsolateAccountingLength();
heap->update_external_memory(-static_cast<intptr_t>(length));
return backing_store;
}
@@ -98,7 +90,7 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
it != array_buffers_.end();) {
// Unchecked cast because the map might already be dead at this point.
JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first);
- const size_t length = PerIsolateAccountingLength(buffer);
+ const size_t length = it->second->PerIsolateAccountingLength();
if (should_free(buffer)) {
// Destroy the shared pointer, (perhaps) freeing the backing store.
@@ -135,7 +127,7 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
void LocalArrayBufferTracker::Add(JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store) {
- auto length = PerIsolateAccountingLength(buffer);
+ auto length = backing_store->PerIsolateAccountingLength();
page_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
@@ -169,7 +161,7 @@ std::shared_ptr<BackingStore> LocalArrayBufferTracker::Remove(
array_buffers_.erase(it);
// Update accounting.
- auto length = PerIsolateAccountingLength(buffer);
+ auto length = backing_store->PerIsolateAccountingLength();
page_->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index b284a65f66..e79f86942f 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -50,7 +50,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- const size_t length = PerIsolateAccountingLength(old_buffer);
+ const size_t length = it->second->PerIsolateAccountingLength();
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
tracker->AddInternal(new_buffer, std::move(it->second));
@@ -60,8 +60,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
static_cast<MemoryChunk*>(target_page), length);
}
} else if (result == kRemoveEntry) {
- freed_memory += PerIsolateAccountingLength(old_buffer);
auto backing_store = std::move(it->second);
+ freed_memory += backing_store->PerIsolateAccountingLength();
TRACE_BS("ABT:queue bs=%p mem=%p (length=%zu) cnt=%ld\n",
backing_store.get(), backing_store->buffer_start(),
backing_store->byte_length(), backing_store.use_count());
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 9bed247194..562719c07f 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -524,7 +524,7 @@ void ConcurrentMarking::ScheduleTasks() {
#else // defined(OS_MACOSX)
// On other platforms use all logical cores, leaving one for the main
// thread.
- total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
+ total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 2));
#endif // defined(OS_MACOSX)
DCHECK_LE(total_task_count_, kMaxTasks);
// One task is for the main thread.
diff --git a/deps/v8/src/heap/cppgc/allocation.cc b/deps/v8/src/heap/cppgc/allocation.cc
new file mode 100644
index 0000000000..7e98d1eec9
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/allocation.cc
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/allocation.h"
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/heap-inl.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
+ size_t size,
+ GCInfoIndex index) {
+ DCHECK_NOT_NULL(heap);
+ return Heap::From(heap)->Allocate(size, index);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers.S b/deps/v8/src/heap/cppgc/asm/x64/push_registers.S
new file mode 100644
index 0000000000..018859d5c0
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/x64/push_registers.S
@@ -0,0 +1,52 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+.att_syntax
+
+.text
+
+#ifdef V8_TARGET_OS_MACOSX
+
+.globl _PushAllRegistersAndIterateStack
+_PushAllRegistersAndIterateStack:
+
+#else // !V8_TARGET_OS_MACOSX
+
+.type PushAllRegistersAndIterateStack, %function
+.global PushAllRegistersAndIterateStack
+.hidden PushAllRegistersAndIterateStack
+PushAllRegistersAndIterateStack:
+
+#endif // !V8_TARGET_OS_MACOSX
+
+ // Push all callee-saved registers to get them on the stack for conservative
+ // stack scanning.
+ //
+ // We maintain 16-byte alignment at calls. There is an 8-byte return address
+ // on the stack and we push 56 bytes which maintains 16-byte stack alignment
+ // at the call.
+ // Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
+ //
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ push %rbp
+ mov %rsp, %rbp
+ push $0xCDCDCD // Dummy for alignment.
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ // Pass 1st parameter (rdi) unchanged (Stack*).
+ // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
+ // Save 3rd parameter (rdx; IterateStackCallback)
+ mov %rdx, %r8
+ // Pass 3rd parameter as rsp (stack pointer).
+ mov %rsp, %rdx
+ // Call the callback.
+ call *%r8
+ // Pop the callee-saved registers.
+ add $48, %rsp
+ // Restore rbp as it was used as frame pointer.
+ pop %rbp
+ ret
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S b/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S
new file mode 100644
index 0000000000..627843830f
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S
@@ -0,0 +1,45 @@
+;; Copyright 2020 the V8 project authors. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; MASM syntax
+;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
+
+public PushAllRegistersAndIterateStack
+
+.code
+PushAllRegistersAndIterateStack:
+ ;; Push all callee-saved registers to get them on the stack for conservative
+ ;; stack scanning.
+ ;;
+ ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
+ ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
+ ;; at the call.
+ ;; Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
+ ;;
+ ;; rbp is callee-saved. Maintain proper frame pointer for debugging.
+ push rbp
+ mov rbp, rsp
+ push 0CDCDCDh ;; Dummy for alignment.
+ push rsi
+ push rdi
+ push rbx
+ push r12
+ push r13
+ push r14
+ push r15
+ ;; Pass 1st parameter (rcx) unchanged (Stack*).
+ ;; Pass 2nd parameter (rdx) unchanged (StackVisitor*).
+ ;; Save 3rd parameter (r8; IterateStackCallback)
+ mov r9, r8
+ ;; Pass 3rd parameter as rsp (stack pointer).
+ mov r8, rsp
+ ;; Call the callback.
+ call r9
+ ;; Pop the callee-saved registers.
+ add rsp, 64
+ ;; Restore rbp as it was used as frame pointer.
+ pop rbp
+ ret
+
+end
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.cc b/deps/v8/src/heap/cppgc/gc-info-table.cc
new file mode 100644
index 0000000000..580ff4d069
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/gc-info-table.cc
@@ -0,0 +1,124 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/gc-info-table.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+#include "include/cppgc/gc-info.h"
+#include "include/cppgc/platform.h"
+#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+constexpr size_t kEntrySize = sizeof(GCInfo);
+static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
+ "GCInfoTable entries size must be power of "
+ "two");
+
+} // namespace
+
+GCInfoTable* GlobalGCInfoTable::global_table_ = nullptr;
+constexpr GCInfoIndex GCInfoTable::kMaxIndex;
+constexpr GCInfoIndex GCInfoTable::kMinIndex;
+constexpr GCInfoIndex GCInfoTable::kInitialWantedLimit;
+
+void GlobalGCInfoTable::Create(PageAllocator* page_allocator) {
+ static v8::base::LeakyObject<GCInfoTable> table(page_allocator);
+ if (!global_table_) {
+ global_table_ = table.get();
+ }
+}
+
+GCInfoTable::GCInfoTable(PageAllocator* page_allocator)
+ : page_allocator_(page_allocator),
+ table_(static_cast<decltype(table_)>(page_allocator_->AllocatePages(
+ nullptr, MaxTableSize(), page_allocator_->AllocatePageSize(),
+ PageAllocator::kNoAccess))),
+ read_only_table_end_(reinterpret_cast<uint8_t*>(table_)) {
+ CHECK(table_);
+ Resize();
+}
+
+GCInfoTable::~GCInfoTable() {
+ page_allocator_->ReleasePages(const_cast<GCInfo*>(table_), MaxTableSize(), 0);
+}
+
+size_t GCInfoTable::MaxTableSize() const {
+ return RoundUp(GCInfoTable::kMaxIndex * kEntrySize,
+ page_allocator_->AllocatePageSize());
+}
+
+GCInfoIndex GCInfoTable::InitialTableLimit() const {
+ // Different OSes have different page sizes, so we have to choose the minimum
+ // of memory wanted and OS page size.
+ constexpr size_t memory_wanted = kInitialWantedLimit * kEntrySize;
+ const size_t initial_limit =
+ RoundUp(memory_wanted, page_allocator_->AllocatePageSize()) / kEntrySize;
+ CHECK_GT(std::numeric_limits<GCInfoIndex>::max(), initial_limit);
+ return static_cast<GCInfoIndex>(
+ std::min(static_cast<size_t>(kMaxIndex), initial_limit));
+}
+
+void GCInfoTable::Resize() {
+ const GCInfoIndex new_limit = (limit_) ? 2 * limit_ : InitialTableLimit();
+ CHECK_GT(new_limit, limit_);
+ const size_t old_committed_size = limit_ * kEntrySize;
+ const size_t new_committed_size = new_limit * kEntrySize;
+ CHECK(table_);
+ CHECK_EQ(0u, new_committed_size % page_allocator_->AllocatePageSize());
+ CHECK_GE(MaxTableSize(), new_committed_size);
+ // Recommit new area as read/write.
+ uint8_t* current_table_end =
+ reinterpret_cast<uint8_t*>(table_) + old_committed_size;
+ const size_t table_size_delta = new_committed_size - old_committed_size;
+ CHECK(page_allocator_->SetPermissions(current_table_end, table_size_delta,
+ PageAllocator::kReadWrite));
+ // Recommit old area as read-only.
+ if (read_only_table_end_ != current_table_end) {
+ DCHECK_GT(current_table_end, read_only_table_end_);
+ const size_t read_only_delta = current_table_end - read_only_table_end_;
+ CHECK(page_allocator_->SetPermissions(read_only_table_end_, read_only_delta,
+ PageAllocator::kRead));
+ read_only_table_end_ += read_only_delta;
+ }
+
+ // Check that newly-committed memory is zero-initialized.
+ CheckMemoryIsZeroed(reinterpret_cast<uintptr_t*>(current_table_end),
+ table_size_delta / sizeof(uintptr_t));
+
+ limit_ = new_limit;
+}
+
+void GCInfoTable::CheckMemoryIsZeroed(uintptr_t* base, size_t len) {
+#if DEBUG
+ for (size_t i = 0; i < len; ++i) {
+ DCHECK(!base[i]);
+ }
+#endif // DEBUG
+}
+
+GCInfoIndex GCInfoTable::RegisterNewGCInfo(const GCInfo& info) {
+ // Ensuring a new index involves current index adjustment as well as
+ // potentially resizing the table. For simplicity we use a lock.
+ v8::base::MutexGuard guard(&table_mutex_);
+
+ if (current_index_ == limit_) {
+ Resize();
+ }
+
+ GCInfoIndex new_index = current_index_++;
+ CHECK_LT(new_index, GCInfoTable::kMaxIndex);
+ table_[new_index] = info;
+ return new_index;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
new file mode 100644
index 0000000000..c5ccec2a38
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -0,0 +1,113 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GC_INFO_TABLE_H_
+#define V8_HEAP_CPPGC_GC_INFO_TABLE_H_
+
+#include <stdint.h>
+
+#include "include/cppgc/gc-info.h"
+#include "include/cppgc/platform.h"
+#include "include/v8config.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+
+namespace cppgc {
+namespace internal {
+
+// GCInfo contains metadata for objects that are instantiated from classes that
+// inherit from GarbageCollected.
+struct GCInfo final {
+ FinalizationCallback finalize;
+ bool has_v_table;
+};
+
+class V8_EXPORT GCInfoTable final {
+ public:
+ // At maximum |kMaxIndex - 1| indices are supported.
+ //
+ // We assume that 14 bits are enough to represent all possible types.
+ //
+ // For Blink during telemetry runs, we see about 1,000 different types;
+ // looking at the output of the Oilpan GC clang plugin, there appear to be at
+ // most about 6,000 types. Thus 14 bits should be more than twice as many bits
+ // as we will ever need. Different contexts may require adjusting this limit.
+ static constexpr GCInfoIndex kMaxIndex = 1 << 14;
+
+ // Minimum index returned. Values smaller |kMinIndex| may be used as
+ // sentinels.
+ static constexpr GCInfoIndex kMinIndex = 1;
+
+ // (Light) experimentation suggests that Blink doesn't need more than this
+ // while handling content on popular web properties.
+ static constexpr GCInfoIndex kInitialWantedLimit = 512;
+
+ // Refer through GlobalGCInfoTable for retrieving the global table outside
+ // of testing code.
+ explicit GCInfoTable(PageAllocator* page_allocator);
+ ~GCInfoTable();
+
+ GCInfoIndex RegisterNewGCInfo(const GCInfo& info);
+
+ const GCInfo& GCInfoFromIndex(GCInfoIndex index) const {
+ DCHECK_GE(index, kMinIndex);
+ DCHECK_LT(index, kMaxIndex);
+ DCHECK(table_);
+ return table_[index];
+ }
+
+ GCInfoIndex NumberOfGCInfosForTesting() const { return current_index_; }
+ GCInfoIndex LimitForTesting() const { return limit_; }
+ GCInfo& TableSlotForTesting(GCInfoIndex index) { return table_[index]; }
+
+ private:
+ void Resize();
+
+ GCInfoIndex InitialTableLimit() const;
+ size_t MaxTableSize() const;
+
+ void CheckMemoryIsZeroed(uintptr_t* base, size_t len);
+
+ PageAllocator* page_allocator_;
+ // Holds the per-class GCInfo descriptors; each HeapObjectHeader keeps an
+ // index into this table.
+ GCInfo* table_;
+ uint8_t* read_only_table_end_;
+ // Current index used when requiring a new GCInfo object.
+ GCInfoIndex current_index_ = kMinIndex;
+ // The limit (exclusive) of the currently allocated table.
+ GCInfoIndex limit_ = 0;
+
+ v8::base::Mutex table_mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(GCInfoTable);
+};
+
+class V8_EXPORT GlobalGCInfoTable final {
+ public:
+ // Sets up a singleton table that can be acquired using Get().
+ static void Create(PageAllocator* page_allocator);
+
+ // Accessors for the singleton table.
+ static GCInfoTable& GetMutable() { return *global_table_; }
+ static const GCInfoTable& Get() { return *global_table_; }
+
+ static const GCInfo& GCInfoFromIndex(GCInfoIndex index) {
+ return Get().GCInfoFromIndex(index);
+ }
+
+ private:
+ // Singleton for each process. Retrieved through Get().
+ static GCInfoTable* global_table_;
+
+ DISALLOW_NEW_AND_DELETE()
+ DISALLOW_COPY_AND_ASSIGN(GlobalGCInfoTable);
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GC_INFO_TABLE_H_
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
new file mode 100644
index 0000000000..21492825cc
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/gc-info.h"
+#include "include/v8config.h"
+#include "src/heap/cppgc/gc-info-table.h"
+
+namespace cppgc {
+namespace internal {
+
+RegisteredGCInfoIndex::RegisteredGCInfoIndex(
+ FinalizationCallback finalization_callback, bool has_v_table)
+ : index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ {finalization_callback, has_v_table})) {}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
new file mode 100644
index 0000000000..18a7e3189e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GLOBALS_H_
+#define V8_HEAP_CPPGC_GLOBALS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace cppgc {
+namespace internal {
+
+using Address = uint8_t*;
+using ConstAddress = const uint8_t*;
+
+// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
+// do not fully support all alignment restrictions (following
+// alignof(std​::​max_­align_­t)) but limit to alignof(double).
+//
+// This means that any scalar type with stricter alignment requirements (in
+// practice: long double) cannot be used unrestricted in garbage-collected
+// objects.
+//
+// Note: We use the same allocation granularity on 32-bit and 64-bit systems.
+constexpr size_t kAllocationGranularity = 8;
+constexpr size_t kAllocationMask = kAllocationGranularity - 1;
+
+constexpr size_t kPageSizeLog2 = 17;
+constexpr size_t kPageSize = 1 << kPageSizeLog2;
+constexpr size_t kPageOffsetMask = kPageSize - 1;
+constexpr size_t kPageBaseMask = ~kPageOffsetMask;
+
+constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GLOBALS_H_
diff --git a/deps/v8/src/heap/cppgc/heap-inl.h b/deps/v8/src/heap/cppgc/heap-inl.h
new file mode 100644
index 0000000000..28a4a14139
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap.h"
+
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+
+#ifndef V8_HEAP_CPPGC_HEAP_INL_H_
+#define V8_HEAP_CPPGC_HEAP_INL_H_
+
+namespace cppgc {
+namespace internal {
+
+void* Heap::Allocate(size_t size, GCInfoIndex index) {
+ // TODO(chromium:1056170): This is merely a dummy implementation and will be
+ // replaced with proper allocation code throughout the migration.
+ size_t allocation_size = size + sizeof(HeapObjectHeader);
+ // The allocation size calculation can overflow for large sizes.
+ CHECK_GT(allocation_size, size);
+ // calloc() provides stricter alignment guarantees than the GC. Allocate
+ // a multiple of kAllocationGranularity to follow restrictions of
+ // HeapObjectHeader.
+ allocation_size = (allocation_size + kAllocationMask) & ~kAllocationMask;
+ void* memory = calloc(1, allocation_size);
+ HeapObjectHeader* header =
+ new (memory) HeapObjectHeader(allocation_size, index);
+ objects_.push_back(header);
+ return header->Payload();
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_INL_H_
diff --git a/deps/v8/src/heap/cppgc/heap-object-header-inl.h b/deps/v8/src/heap/cppgc/heap-object-header-inl.h
new file mode 100644
index 0000000000..a0bcda464b
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-object-header-inl.h
@@ -0,0 +1,148 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
+#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/gc-info.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
+ return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
+ sizeof(HeapObjectHeader));
+}
+
+// static
+const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
+ return *reinterpret_cast<const HeapObjectHeader*>(
+ static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
+}
+
+HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
+#if defined(V8_TARGET_ARCH_64_BIT)
+ USE(padding_);
+#endif // defined(V8_TARGET_ARCH_64_BIT)
+ DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
+ DCHECK_EQ(0u, size & kAllocationMask);
+ DCHECK_GE(kMaxSize, size);
+ encoded_high_ = GCInfoIndexField::encode(gc_info_index);
+ encoded_low_ = EncodeSize(size);
+ DCHECK(IsInConstruction());
+#ifdef DEBUG
+ CheckApiConstants();
+#endif // DEBUG
+}
+
+Address HeapObjectHeader::Payload() const {
+ return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
+ sizeof(HeapObjectHeader);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
+ const uint16_t encoded =
+ LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
+ return GCInfoIndexField::decode(encoded);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+size_t HeapObjectHeader::GetSize() const {
+ // Size is immutable after construction while either marking or sweeping
+ // is running so relaxed load (if mode == kAtomic) is enough.
+ uint16_t encoded_low_value =
+ LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
+ const size_t size = DecodeSize(encoded_low_value);
+ return size;
+}
+
+void HeapObjectHeader::SetSize(size_t size) {
+ DCHECK(!IsMarked());
+ encoded_low_ |= EncodeSize(size);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsLargeObject() const {
+ return GetSize<mode>() == kLargeObjectSizeInHeader;
+}
+
+template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsInConstruction() const {
+ const uint16_t encoded =
+ LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
+ return !FullyConstructedField::decode(encoded);
+}
+
+void HeapObjectHeader::MarkAsFullyConstructed() {
+ MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
+}
+
+template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsMarked() const {
+ const uint16_t encoded =
+ LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
+ return MarkBitField::decode(encoded);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+void HeapObjectHeader::Unmark() {
+ DCHECK(IsMarked<mode>());
+ StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
+ MarkBitField::encode(false), MarkBitField::kMask);
+}
+
+bool HeapObjectHeader::TryMarkAtomic() {
+ auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
+ uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
+ const uint16_t new_value = old_value | MarkBitField::encode(true);
+ if (new_value == old_value) {
+ return false;
+ }
+ return atomic_encoded->compare_exchange_strong(old_value, new_value,
+ std::memory_order_relaxed);
+}
+
+template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
+ std::memory_order memory_order>
+uint16_t HeapObjectHeader::LoadEncoded() const {
+ const uint16_t& half =
+ part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
+ if (mode == AccessMode::kNonAtomic) return half;
+ return v8::base::AsAtomicPtr(&half)->load(memory_order);
+}
+
+template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
+ std::memory_order memory_order>
+void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
+ // Caveat: Not all changes to HeapObjectHeader's bitfields go through
+ // StoreEncoded. The following have their own implementations and need to be
+ // kept in sync:
+ // - HeapObjectHeader::TryMarkAtomic
+ // - MarkObjectAsFullyConstructed (API)
+ DCHECK_EQ(0u, bits & ~mask);
+ uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
+ if (mode == AccessMode::kNonAtomic) {
+ half = (half & ~mask) | bits;
+ return;
+ }
+ // We don't perform CAS loop here assuming that only none of the info that
+ // shares the same encoded halfs change at the same time.
+ auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
+ uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
+ value = (value & ~mask) | bits;
+ atomic_encoded->store(value, memory_order);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.cc b/deps/v8/src/heap/cppgc/heap-object-header.cc
new file mode 100644
index 0000000000..bd90d5930c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-object-header.cc
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-object-header.h"
+
+#include "include/cppgc/internals.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+
+namespace cppgc {
+namespace internal {
+
+void HeapObjectHeader::CheckApiConstants() {
+ STATIC_ASSERT(api_constants::kFullyConstructedBitMask ==
+ FullyConstructedField::kMask);
+ STATIC_ASSERT(api_constants::kFullyConstructedBitFieldOffsetFromPayload ==
+ (sizeof(encoded_high_) + sizeof(encoded_low_)));
+}
+
+void HeapObjectHeader::Finalize() {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ if (gc_info.finalize) {
+ gc_info.finalize(Payload());
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
new file mode 100644
index 0000000000..738f9d9ab9
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -0,0 +1,127 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
+#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
+
+#include <stdint.h>
+#include <atomic>
+
+#include "include/cppgc/gc-info.h"
+#include "src/base/bit-field.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+// HeapObjectHeader contains meta data per object and is prepended to each
+// object.
+//
+// +-----------------+------+------------------------------------------+
+// | name | bits | |
+// +-----------------+------+------------------------------------------+
+// | padding | 32 | Only present on 64-bit platform. |
+// +-----------------+------+------------------------------------------+
+// | GCInfoIndex | 14 | |
+// | unused | 1 | |
+// | in construction | 1 | In construction encoded as |false|. |
+// +-----------------+------+------------------------------------------+
+// | size | 14 | 17 bits because allocations are aligned. |
+// | unused | 1 | |
+// | mark bit | 1 | |
+// +-----------------+------+------------------------------------------+
+//
+// Notes:
+// - See |GCInfoTable| for constraints on GCInfoIndex.
+// - |size| for regular objects is encoded with 14 bits but can actually
+// represent sizes up to |kBlinkPageSize| (2^17) because allocations are
+// always 8 byte aligned (see kAllocationGranularity).
+// - |size| for large objects is encoded as 0. The size of a large object is
+// stored in |LargeObjectPage::PayloadSize()|.
+// - |mark bit| and |in construction| bits are located in separate 16-bit halves
+// to allow potentially accessing them non-atomically.
+class HeapObjectHeader final {
+ public:
+ enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
+
+ static constexpr size_t kSizeLog2 = 17;
+ static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
+
+ inline static HeapObjectHeader& FromPayload(void* address);
+ inline static const HeapObjectHeader& FromPayload(const void* address);
+
+ inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
+
+ // The payload starts directly after the HeapObjectHeader.
+ inline Address Payload() const;
+
+ template <AccessMode mode = AccessMode::kNonAtomic>
+ inline GCInfoIndex GetGCInfoIndex() const;
+
+ template <AccessMode mode = AccessMode::kNonAtomic>
+ inline size_t GetSize() const;
+ inline void SetSize(size_t size);
+
+ template <AccessMode mode = AccessMode::kNonAtomic>
+ inline bool IsLargeObject() const;
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsInConstruction() const;
+ inline void MarkAsFullyConstructed();
+ // Use MarkObjectAsFullyConstructed() to mark an object as being constructed.
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsMarked() const;
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Unmark();
+ inline bool TryMarkAtomic();
+
+ void Finalize();
+
+ private:
+ enum class EncodedHalf : uint8_t { kLow, kHigh };
+
+ static constexpr uint16_t kLargeObjectSizeInHeader = 0;
+
+ // Used in |encoded_high_|.
+ using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
+ using UnusedField1 = FullyConstructedField::Next<bool, 1>;
+ using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
+ // Used in |encoded_low_|.
+ using MarkBitField = v8::base::BitField16<bool, 0, 1>;
+ using UnusedField2 = MarkBitField::Next<bool, 1>;
+ using SizeField = void; // Use EncodeSize/DecodeSize instead.
+
+ static constexpr size_t DecodeSize(uint16_t encoded) {
+ // Essentially, gets optimized to << 1.
+ using SizeField = UnusedField2::Next<size_t, 14>;
+ return SizeField::decode(encoded) * kAllocationGranularity;
+ }
+
+ static constexpr uint16_t EncodeSize(size_t size) {
+ // Essentially, gets optimized to >> 1.
+ using SizeField = UnusedField2::Next<size_t, 14>;
+ return SizeField::encode(size) / kAllocationGranularity;
+ }
+
+ V8_EXPORT_PRIVATE void CheckApiConstants();
+
+ template <AccessMode, EncodedHalf part,
+ std::memory_order memory_order = std::memory_order_seq_cst>
+ inline uint16_t LoadEncoded() const;
+ template <AccessMode mode, EncodedHalf part,
+ std::memory_order memory_order = std::memory_order_seq_cst>
+ inline void StoreEncoded(uint16_t bits, uint16_t mask);
+
+#if defined(V8_TARGET_ARCH_64_BIT)
+ uint32_t padding_ = 0;
+#endif // defined(V8_TARGET_ARCH_64_BIT)
+ uint16_t encoded_high_;
+ uint16_t encoded_low_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
new file mode 100644
index 0000000000..e60cb15573
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap.h"
+
+#include <memory>
+
+#include "src/heap/cppgc/heap-object-header.h"
+
+namespace cppgc {
+
+std::unique_ptr<Heap> Heap::Create() {
+ return std::make_unique<internal::Heap>();
+}
+
+namespace internal {
+
+void Heap::CollectGarbage() {
+ for (HeapObjectHeader* header : objects_) {
+ header->Finalize();
+ free(header);
+ }
+ objects_.clear();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
new file mode 100644
index 0000000000..baf70d8f4e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_H_
+#define V8_HEAP_CPPGC_HEAP_H_
+
+#include <vector>
+
+#include "include/cppgc/gc-info.h"
+#include "include/cppgc/heap.h"
+#include "src/heap/cppgc/heap-object-header.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
+ public:
+ static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
+
+ Heap() = default;
+ ~Heap() final = default;
+
+ inline void* Allocate(size_t size, GCInfoIndex index);
+
+ void CollectGarbage();
+
+ private:
+ std::vector<HeapObjectHeader*> objects_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_H_
diff --git a/deps/v8/src/heap/cppgc/platform.cc b/deps/v8/src/heap/cppgc/platform.cc
new file mode 100644
index 0000000000..3b20060392
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/platform.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/platform.h"
+
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/gc-info-table.h"
+
+namespace cppgc {
+namespace internal {
+
+static PageAllocator* g_page_allocator;
+
+} // namespace internal
+
+void InitializePlatform(PageAllocator* page_allocator) {
+ internal::g_page_allocator = page_allocator;
+ internal::GlobalGCInfoTable::Create(page_allocator);
+}
+
+void ShutdownPlatform() { internal::g_page_allocator = nullptr; }
+
+namespace internal {
+
+void Abort() { v8::base::OS::Abort(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sanitizers.h b/deps/v8/src/heap/cppgc/sanitizers.h
new file mode 100644
index 0000000000..e3102b01ed
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/sanitizers.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_SANITIZERS_H_
+#define V8_HEAP_CPPGC_SANITIZERS_H_
+
+#include "src/base/macros.h"
+
+//
+// TODO(chromium:1056170): Find a place in base for sanitizer support.
+//
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+
+#include <sanitizer/asan_interface.h>
+
+#define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+
+#else // !V8_USE_ADDRESS_SANITIZER
+
+#define NO_SANITIZE_ADDRESS
+
+#endif // V8_USE_ADDRESS_SANITIZER
+
+#ifdef V8_USE_MEMORY_SANITIZER
+
+#include <sanitizer/msan_interface.h>
+
+#define MSAN_UNPOISON(addr, size) __msan_unpoison(addr, size)
+
+#else // !V8_USE_MEMORY_SANITIZER
+
+#define MSAN_UNPOISON(addr, size) ((void)(addr), (void)(size))
+
+#endif // V8_USE_MEMORY_SANITIZER
+
+#endif // V8_HEAP_CPPGC_SANITIZERS_H_
diff --git a/deps/v8/src/heap/cppgc/stack.cc b/deps/v8/src/heap/cppgc/stack.cc
new file mode 100644
index 0000000000..a821768917
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/stack.cc
@@ -0,0 +1,136 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/stack.h"
+
+#include <limits>
+
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+using IterateStackCallback = void (Stack::*)(StackVisitor*, intptr_t*) const;
+extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
+ IterateStackCallback);
+
+Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
+
+bool Stack::IsOnStack(void* slot) const {
+ void* raw_slot = v8::base::Stack::GetStackSlot(slot);
+ return v8::base::Stack::GetCurrentStackPosition() <= raw_slot &&
+ raw_slot <= stack_start_;
+}
+
+namespace {
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+
+// No ASAN support as accessing fake frames otherwise results in
+// "stack-use-after-scope" warnings.
+NO_SANITIZE_ADDRESS
+void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
+ void* asan_fake_stack,
+ const void* stack_start,
+ const void* stack_end, void* address) {
+ // When using ASAN fake stack a pointer to the fake frame is kept on the
+ // native frame. In case |addr| points to a fake frame of the current stack
+ // iterate the fake frame. Frame layout see
+ // https://github.com/google/sanitizers/wiki/AddressSanitizerUseAfterReturn
+ if (asan_fake_stack) {
+ void* fake_frame_begin;
+ void* fake_frame_end;
+ void* real_stack_frame = __asan_addr_is_in_fake_stack(
+ asan_fake_stack, address, &fake_frame_begin, &fake_frame_end);
+ if (real_stack_frame) {
+ // |address| points to a fake frame. Check that the fake frame is part
+ // of this stack.
+ if (stack_start >= real_stack_frame && real_stack_frame >= stack_end) {
+ // Iterate the fake frame.
+ for (void** current = reinterpret_cast<void**>(fake_frame_begin);
+ current < fake_frame_end; ++current) {
+ void* addr = *current;
+ if (addr == nullptr) continue;
+ visitor->VisitPointer(addr);
+ }
+ }
+ }
+ }
+}
+
+#endif // V8_USE_ADDRESS_SANITIZER
+
+#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+
+void IterateSafeStackIfNecessary(StackVisitor* visitor) {
+#if defined(__has_feature)
+#if __has_feature(safe_stack)
+ // Source:
+ // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/safestack/safestack.cpp
+ constexpr size_t kSafeStackAlignmentBytes = 16;
+ void* stack_end = __builtin___get_unsafe_stack_ptr();
+ void* stack_start = __builtin___get_unsafe_stack_top();
+ CHECK_GT(stack_start, stack_end);
+ CHECK_EQ(0u, reinterpret_cast<uintptr_t>(stack_end) &
+ (kSafeStackAlignmentBytes - 1));
+ CHECK_EQ(0u, reinterpret_cast<uintptr_t>(stack_start) &
+ (kSafeStackAlignmentBytes - 1));
+ void** current = reinterpret_cast<void**>(stack_end);
+ for (; current < stack_start; ++current) {
+ void* address = *current;
+ if (address == nullptr) continue;
+ visitor->VisitPointer(address);
+ }
+#endif // __has_feature(safe_stack)
+#endif // defined(__has_feature)
+}
+
+#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+
+} // namespace
+
+#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+void Stack::IteratePointers(StackVisitor* visitor) const {
+ PushAllRegistersAndIterateStack(this, visitor, &Stack::IteratePointersImpl);
+ // No need to deal with callee-saved registers as they will be kept alive by
+ // the regular conservative stack iteration.
+ IterateSafeStackIfNecessary(visitor);
+}
+#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+
+// No ASAN support as method accesses redzones while walking the stack.
+NO_SANITIZE_ADDRESS
+void Stack::IteratePointersImpl(StackVisitor* visitor,
+ intptr_t* stack_end) const {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ void* asan_fake_stack = __asan_get_current_fake_stack();
+#endif // V8_USE_ADDRESS_SANITIZER
+ // All supported platforms should have their stack aligned to at least
+ // sizeof(void*).
+ constexpr size_t kMinStackAlignment = sizeof(void*);
+ // Redzone should not contain any pointers as the iteration is always called
+ // from the assembly trampoline. If inline assembly is ever inlined through
+ // LTO this may become necessary.
+ constexpr size_t kRedZoneBytes = 128;
+ void** current = reinterpret_cast<void**>(
+ reinterpret_cast<uintptr_t>(stack_end - kRedZoneBytes));
+ CHECK_EQ(0u, reinterpret_cast<uintptr_t>(current) & (kMinStackAlignment - 1));
+ for (; current < stack_start_; ++current) {
+ // MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
+ // into a local which is unpoisoned.
+ void* address = *current;
+ MSAN_UNPOISON(address, sizeof(address));
+ if (address == nullptr) continue;
+ visitor->VisitPointer(address);
+#ifdef V8_USE_ADDRESS_SANITIZER
+ IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack, stack_start_,
+ stack_end, address);
+#endif // V8_USE_ADDRESS_SANITIZER
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/stack.h b/deps/v8/src/heap/cppgc/stack.h
new file mode 100644
index 0000000000..599bf3a54a
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/stack.h
@@ -0,0 +1,49 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_STACK_H_
+#define V8_HEAP_CPPGC_STACK_H_
+
+#include "src/base/macros.h"
+
+// TODO(chromium:1056170): Implement all platforms.
+#if defined(V8_TARGET_ARCH_X64)
+#define CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN 1
+#endif
+
+namespace cppgc {
+namespace internal {
+
+class StackVisitor {
+ public:
+ virtual void VisitPointer(const void* address) = 0;
+};
+
+// Abstraction over the stack. Supports handling of:
+// - native stack;
+// - ASAN/MSAN;
+// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
+class V8_EXPORT_PRIVATE Stack final {
+ public:
+ explicit Stack(const void* stack_start);
+
+ // Returns true if |slot| is part of the stack and false otherwise.
+ bool IsOnStack(void* slot) const;
+
+ // Word-aligned iteration of the stack. Slot values are passed on to
+ // |visitor|.
+#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+ void IteratePointers(StackVisitor* visitor) const;
+#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+
+ private:
+ void IteratePointersImpl(StackVisitor* visitor, intptr_t* stack_end) const;
+
+ const void* stack_start_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_STACK_H_
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index e3317b1379..2b46da1feb 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -24,7 +24,6 @@ void LocalEmbedderHeapTracer::TracePrologue(
EmbedderHeapTracer::TraceFlags flags) {
if (!InUse()) return;
- num_v8_marking_worklist_was_empty_ = 0;
embedder_worklist_empty_ = false;
remote_tracer_->TracePrologue(flags);
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 05932ea141..cc3801e479 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -69,15 +69,9 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
remote_tracer_->ResetHandleInNonTracingGC(handle);
}
- void NotifyV8MarkingWorklistWasEmpty() {
- num_v8_marking_worklist_was_empty_++;
- }
-
bool ShouldFinalizeIncrementalMarking() {
- static const size_t kMaxIncrementalFixpointRounds = 3;
return !FLAG_incremental_marking_wrappers || !InUse() ||
- (IsRemoteTracingDone() && embedder_worklist_empty_) ||
- num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
+ (IsRemoteTracingDone() && embedder_worklist_empty_);
}
void SetEmbedderStackStateForNextFinalization(
@@ -114,7 +108,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
- size_t num_v8_marking_worklist_was_empty_ = 0;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
EmbedderHeapTracer::kUnknown;
// Indicates whether the embedder worklist was observed empty on the main
diff --git a/deps/v8/src/heap/factory-base-inl.h b/deps/v8/src/heap/factory-base-inl.h
new file mode 100644
index 0000000000..6f218b8248
--- /dev/null
+++ b/deps/v8/src/heap/factory-base-inl.h
@@ -0,0 +1,99 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FACTORY_BASE_INL_H_
+#define V8_HEAP_FACTORY_BASE_INL_H_
+
+#include "src/heap/factory-base.h"
+
+#include "src/numbers/conversions.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/smi.h"
+#include "src/roots/roots.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename Impl>
+Handle<Oddball> FactoryBase<Impl>::ToBoolean(bool value) {
+ return value ? impl()->true_value() : impl()->false_value();
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<Object> FactoryBase<Impl>::NewNumber(double value) {
+ // Materialize as a SMI if possible.
+ int32_t int_value;
+ if (DoubleToSmiInteger(value, &int_value)) {
+ return handle(Smi::FromInt(int_value), isolate());
+ }
+ return NewHeapNumber<allocation>(value);
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<Object> FactoryBase<Impl>::NewNumberFromInt(int32_t value) {
+ if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
+ // Bypass NewNumber to avoid various redundant checks.
+ return NewHeapNumber<allocation>(FastI2D(value));
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<Object> FactoryBase<Impl>::NewNumberFromUint(uint32_t value) {
+ int32_t int32v = static_cast<int32_t>(value);
+ if (int32v >= 0 && Smi::IsValid(int32v)) {
+ return handle(Smi::FromInt(int32v), isolate());
+ }
+ return NewHeapNumber<allocation>(FastUI2D(value));
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<Object> FactoryBase<Impl>::NewNumberFromSize(size_t value) {
+ // We can't use Smi::IsValid() here because that operates on a signed
+ // intptr_t, and casting from size_t could create a bogus sign bit.
+ if (value <= static_cast<size_t>(Smi::kMaxValue)) {
+ return handle(Smi::FromIntptr(static_cast<intptr_t>(value)), isolate());
+ }
+ return NewHeapNumber<allocation>(static_cast<double>(value));
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<Object> FactoryBase<Impl>::NewNumberFromInt64(int64_t value) {
+ if (value <= std::numeric_limits<int32_t>::max() &&
+ value >= std::numeric_limits<int32_t>::min() &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return handle(Smi::FromInt(static_cast<int32_t>(value)), isolate());
+ }
+ return NewHeapNumber<allocation>(static_cast<double>(value));
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumber(double value) {
+ Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
+ heap_number->set_value(value);
+ return heap_number;
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumberFromBits(uint64_t bits) {
+ Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
+ heap_number->set_value_as_bits(bits);
+ return heap_number;
+}
+
+template <typename Impl>
+template <AllocationType allocation>
+Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumberWithHoleNaN() {
+ return NewHeapNumberFromBits<allocation>(kHoleNanInt64);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FACTORY_BASE_INL_H_
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index bdd1ccf2e2..e2ef3318ce 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -4,21 +4,448 @@
#include "src/heap/factory-base.h"
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
-#include "src/heap/off-thread-factory.h"
-#include "src/objects/string-inl.h"
-#include "src/utils/memcopy.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/off-thread-factory-inl.h"
+#include "src/heap/read-only-heap.h"
+#include "src/logging/log.h"
+#include "src/logging/off-thread-logger.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/shared-function-info-inl.h"
+#include "src/objects/source-text-module.h"
+#include "src/objects/template-objects-inl.h"
namespace v8 {
namespace internal {
template <typename Impl>
-FactoryHandle<Impl, SeqOneByteString>
-FactoryBase<Impl>::NewOneByteInternalizedString(
+template <AllocationType allocation>
+Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumber() {
+ STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
+ Map map = read_only_roots().heap_number_map();
+ HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, allocation,
+ map, kDoubleUnaligned);
+ return handle(HeapNumber::cast(result), isolate());
+}
+
+template V8_EXPORT_PRIVATE Handle<HeapNumber>
+FactoryBase<Factory>::NewHeapNumber<AllocationType::kYoung>();
+template V8_EXPORT_PRIVATE Handle<HeapNumber>
+FactoryBase<Factory>::NewHeapNumber<AllocationType::kOld>();
+template V8_EXPORT_PRIVATE Handle<HeapNumber>
+FactoryBase<Factory>::NewHeapNumber<AllocationType::kReadOnly>();
+
+template V8_EXPORT_PRIVATE Handle<HeapNumber>
+FactoryBase<OffThreadFactory>::NewHeapNumber<AllocationType::kOld>();
+
+template <typename Impl>
+Handle<Struct> FactoryBase<Impl>::NewStruct(InstanceType type,
+ AllocationType allocation) {
+ Map map = Map::GetInstanceTypeMap(read_only_roots(), type);
+ int size = map.instance_size();
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
+ Handle<Struct> str = handle(Struct::cast(result), isolate());
+ str->InitializeBody(size);
+ return str;
+}
+
+template <typename Impl>
+Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
+ Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(
+ NewStruct(ACCESSOR_PAIR_TYPE, AllocationType::kOld));
+ accessors->set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ accessors->set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ return accessors;
+}
+
+template <typename Impl>
+Handle<FixedArray> FactoryBase<Impl>::NewFixedArray(int length,
+ AllocationType allocation) {
+ DCHECK_LE(0, length);
+ if (length == 0) return impl()->empty_fixed_array();
+ return NewFixedArrayWithFiller(
+ read_only_roots().fixed_array_map_handle(), length,
+ read_only_roots().undefined_value_handle(), allocation);
+}
+
+template <typename Impl>
+Handle<FixedArray> FactoryBase<Impl>::NewFixedArrayWithMap(
+ Handle<Map> map, int length, AllocationType allocation) {
+ // Zero-length case must be handled outside, where the knowledge about
+ // the map is.
+ DCHECK_LT(0, length);
+ return NewFixedArrayWithFiller(
+ map, length, read_only_roots().undefined_value_handle(), allocation);
+}
+
+template <typename Impl>
+Handle<FixedArray> FactoryBase<Impl>::NewFixedArrayWithHoles(
+ int length, AllocationType allocation) {
+ DCHECK_LE(0, length);
+ if (length == 0) return impl()->empty_fixed_array();
+ return NewFixedArrayWithFiller(
+ read_only_roots().fixed_array_map_handle(), length,
+ read_only_roots().the_hole_value_handle(), allocation);
+}
+
+template <typename Impl>
+Handle<FixedArray> FactoryBase<Impl>::NewFixedArrayWithFiller(
+ Handle<Map> map, int length, Handle<Oddball> filler,
+ AllocationType allocation) {
+ HeapObject result = AllocateRawFixedArray(length, allocation);
+ DCHECK(ReadOnlyHeap::Contains(*map));
+ DCHECK(ReadOnlyHeap::Contains(*filler));
+ result.set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
+ Handle<FixedArray> array = handle(FixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetTagged(array->data_start(), *filler, length);
+ return array;
+}
+
+template <typename Impl>
+Handle<FixedArrayBase> FactoryBase<Impl>::NewFixedDoubleArray(
+ int length, AllocationType allocation) {
+ if (length == 0) return impl()->empty_fixed_array();
+ if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ }
+ int size = FixedDoubleArray::SizeFor(length);
+ Map map = read_only_roots().fixed_double_array_map();
+ HeapObject result =
+ AllocateRawWithImmortalMap(size, allocation, map, kDoubleAligned);
+ Handle<FixedDoubleArray> array =
+ handle(FixedDoubleArray::cast(result), isolate());
+ array->set_length(length);
+ return array;
+}
+
+template <typename Impl>
+Handle<WeakFixedArray> FactoryBase<Impl>::NewWeakFixedArrayWithMap(
+ Map map, int length, AllocationType allocation) {
+ // Zero-length case must be handled outside.
+ DCHECK_LT(0, length);
+ DCHECK(ReadOnlyHeap::Contains(map));
+
+ HeapObject result =
+ AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
+ result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+
+ Handle<WeakFixedArray> array =
+ handle(WeakFixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetTagged(ObjectSlot(array->data_start()),
+ read_only_roots().undefined_value(), length);
+
+ return array;
+}
+
+template <typename Impl>
+Handle<WeakFixedArray> FactoryBase<Impl>::NewWeakFixedArray(
+ int length, AllocationType allocation) {
+ DCHECK_LE(0, length);
+ if (length == 0) return impl()->empty_weak_fixed_array();
+ return NewWeakFixedArrayWithMap(read_only_roots().weak_fixed_array_map(),
+ length, allocation);
+}
+
+template <typename Impl>
+Handle<ByteArray> FactoryBase<Impl>::NewByteArray(int length,
+ AllocationType allocation) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ }
+ int size = ByteArray::SizeFor(length);
+ HeapObject result = AllocateRawWithImmortalMap(
+ size, allocation, read_only_roots().byte_array_map());
+ Handle<ByteArray> array(ByteArray::cast(result), isolate());
+ array->set_length(length);
+ array->clear_padding();
+ return array;
+}
+
+template <typename Impl>
+Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
+ int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
+ Handle<FixedArray> constant_pool) {
+ if (length < 0 || length > BytecodeArray::kMaxLength) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ }
+ // Bytecode array is AllocationType::kOld, so constant pool array should be
+ // too.
+ DCHECK(!Heap::InYoungGeneration(*constant_pool));
+
+ int size = BytecodeArray::SizeFor(length);
+ HeapObject result = AllocateRawWithImmortalMap(
+ size, AllocationType::kOld, read_only_roots().bytecode_array_map());
+ Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
+ instance->set_length(length);
+ instance->set_frame_size(frame_size);
+ instance->set_parameter_count(parameter_count);
+ instance->set_incoming_new_target_or_generator_register(
+ interpreter::Register::invalid_value());
+ instance->set_osr_loop_nesting_level(0);
+ instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
+ instance->set_constant_pool(*constant_pool);
+ instance->set_handler_table(read_only_roots().empty_byte_array());
+ instance->set_source_position_table(read_only_roots().undefined_value());
+ CopyBytes(reinterpret_cast<byte*>(instance->GetFirstBytecodeAddress()),
+ raw_bytecodes, length);
+ instance->clear_padding();
+
+ return instance;
+}
+
+template <typename Impl>
+Handle<Script> FactoryBase<Impl>::NewScript(Handle<String> source) {
+ return NewScriptWithId(source, isolate()->GetNextScriptId());
+}
+
+template <typename Impl>
+Handle<Script> FactoryBase<Impl>::NewScriptWithId(Handle<String> source,
+ int script_id) {
+ // Create and initialize script object.
+ ReadOnlyRoots roots = read_only_roots();
+ Handle<Script> script =
+ Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
+ script->set_source(*source);
+ script->set_name(roots.undefined_value());
+ script->set_id(script_id);
+ script->set_line_offset(0);
+ script->set_column_offset(0);
+ script->set_context_data(roots.undefined_value());
+ script->set_type(Script::TYPE_NORMAL);
+ script->set_line_ends(roots.undefined_value());
+ script->set_eval_from_shared_or_wrapped_arguments(roots.undefined_value());
+ script->set_eval_from_position(0);
+ script->set_shared_function_infos(roots.empty_weak_fixed_array(),
+ SKIP_WRITE_BARRIER);
+ script->set_flags(0);
+ script->set_host_defined_options(roots.empty_fixed_array());
+
+ impl()->AddToScriptList(script);
+
+ LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
+ return script;
+}
+
+template <typename Impl>
+Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfoForLiteral(
+ FunctionLiteral* literal, Handle<Script> script, bool is_toplevel) {
+ FunctionKind kind = literal->kind();
+ Handle<SharedFunctionInfo> shared =
+ NewSharedFunctionInfo(literal->GetName(isolate()), MaybeHandle<Code>(),
+ Builtins::kCompileLazy, kind);
+ SharedFunctionInfo::InitFromFunctionLiteral(isolate(), shared, literal,
+ is_toplevel);
+ shared->SetScript(read_only_roots(), *script, literal->function_literal_id(),
+ false);
+ return shared;
+}
+
+template <typename Impl>
+Handle<PreparseData> FactoryBase<Impl>::NewPreparseData(int data_length,
+ int children_length) {
+ int size = PreparseData::SizeFor(data_length, children_length);
+ Handle<PreparseData> result = handle(
+ PreparseData::cast(AllocateRawWithImmortalMap(
+ size, AllocationType::kOld, read_only_roots().preparse_data_map())),
+ isolate());
+ result->set_data_length(data_length);
+ result->set_children_length(children_length);
+ MemsetTagged(result->inner_data_start(), read_only_roots().null_value(),
+ children_length);
+ result->clear_padding();
+ return result;
+}
+
+template <typename Impl>
+Handle<UncompiledDataWithoutPreparseData>
+FactoryBase<Impl>::NewUncompiledDataWithoutPreparseData(
+ Handle<String> inferred_name, int32_t start_position,
+ int32_t end_position) {
+ Handle<UncompiledDataWithoutPreparseData> result = handle(
+ UncompiledDataWithoutPreparseData::cast(NewWithImmortalMap(
+ impl()->read_only_roots().uncompiled_data_without_preparse_data_map(),
+ AllocationType::kOld)),
+ isolate());
+
+ result->Init(impl(), *inferred_name, start_position, end_position);
+ return result;
+}
+
+template <typename Impl>
+Handle<UncompiledDataWithPreparseData>
+FactoryBase<Impl>::NewUncompiledDataWithPreparseData(
+ Handle<String> inferred_name, int32_t start_position, int32_t end_position,
+ Handle<PreparseData> preparse_data) {
+ Handle<UncompiledDataWithPreparseData> result = handle(
+ UncompiledDataWithPreparseData::cast(NewWithImmortalMap(
+ impl()->read_only_roots().uncompiled_data_with_preparse_data_map(),
+ AllocationType::kOld)),
+ isolate());
+
+ result->Init(impl(), *inferred_name, start_position, end_position,
+ *preparse_data);
+
+ return result;
+}
+
+template <typename Impl>
+Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
+ MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
+ int maybe_builtin_index, FunctionKind kind) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo();
+
+ // Function names are assumed to be flat elsewhere.
+ Handle<String> shared_name;
+ bool has_shared_name = maybe_name.ToHandle(&shared_name);
+ if (has_shared_name) {
+ DCHECK(shared_name->IsFlat());
+ shared->set_name_or_scope_info(*shared_name);
+ } else {
+ DCHECK_EQ(shared->name_or_scope_info(),
+ SharedFunctionInfo::kNoSharedNameSentinel);
+ }
+
+ Handle<HeapObject> function_data;
+ if (maybe_function_data.ToHandle(&function_data)) {
+ // If we pass function_data then we shouldn't pass a builtin index, and
+ // the function_data should not be code with a builtin.
+ DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
+ DCHECK_IMPLIES(function_data->IsCode(),
+ !Code::cast(*function_data).is_builtin());
+ shared->set_function_data(*function_data);
+ } else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
+ shared->set_builtin_id(maybe_builtin_index);
+ } else {
+ shared->set_builtin_id(Builtins::kIllegal);
+ }
+
+ shared->CalculateConstructAsBuiltin();
+ shared->set_kind(kind);
+
+#ifdef VERIFY_HEAP
+ shared->SharedFunctionInfoVerify(isolate());
+#endif // VERIFY_HEAP
+ return shared;
+}
+
+template <typename Impl>
+Handle<ObjectBoilerplateDescription>
+FactoryBase<Impl>::NewObjectBoilerplateDescription(int boilerplate,
+ int all_properties,
+ int index_keys,
+ bool has_seen_proto) {
+ DCHECK_GE(boilerplate, 0);
+ DCHECK_GE(all_properties, index_keys);
+ DCHECK_GE(index_keys, 0);
+
+ int backing_store_size =
+ all_properties - index_keys - (has_seen_proto ? 1 : 0);
+ DCHECK_GE(backing_store_size, 0);
+ bool has_different_size_backing_store = boilerplate != backing_store_size;
+
+ // Space for name and value for every boilerplate property + LiteralType flag.
+ int size =
+ 2 * boilerplate + ObjectBoilerplateDescription::kDescriptionStartIndex;
+
+ if (has_different_size_backing_store) {
+ // An extra entry for the backing store size.
+ size++;
+ }
+
+ Handle<ObjectBoilerplateDescription> description =
+ Handle<ObjectBoilerplateDescription>::cast(NewFixedArrayWithMap(
+ read_only_roots().object_boilerplate_description_map_handle(), size,
+ AllocationType::kOld));
+
+ if (has_different_size_backing_store) {
+ DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
+ has_seen_proto);
+ description->set_backing_store_size(backing_store_size);
+ }
+
+ description->set_flags(0);
+
+ return description;
+}
+
+template <typename Impl>
+Handle<ArrayBoilerplateDescription>
+FactoryBase<Impl>::NewArrayBoilerplateDescription(
+ ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
+ Handle<ArrayBoilerplateDescription> result =
+ Handle<ArrayBoilerplateDescription>::cast(
+ NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
+ result->set_elements_kind(elements_kind);
+ result->set_constant_elements(*constant_values);
+ return result;
+}
+
+template <typename Impl>
+Handle<TemplateObjectDescription>
+FactoryBase<Impl>::NewTemplateObjectDescription(
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
+ DCHECK_EQ(raw_strings->length(), cooked_strings->length());
+ DCHECK_LT(0, raw_strings->length());
+ Handle<TemplateObjectDescription> result =
+ Handle<TemplateObjectDescription>::cast(
+ NewStruct(TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld));
+ result->set_raw_strings(*raw_strings);
+ result->set_cooked_strings(*cooked_strings);
+ return result;
+}
+
+template <typename Impl>
+Handle<FeedbackMetadata> FactoryBase<Impl>::NewFeedbackMetadata(
+ int slot_count, int feedback_cell_count, AllocationType allocation) {
+ DCHECK_LE(0, slot_count);
+ int size = FeedbackMetadata::SizeFor(slot_count);
+ HeapObject result = AllocateRawWithImmortalMap(
+ size, allocation, read_only_roots().feedback_metadata_map());
+ Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
+ data->set_slot_count(slot_count);
+ data->set_closure_feedback_cell_count(feedback_cell_count);
+
+ // Initialize the data section to 0.
+ int data_size = size - FeedbackMetadata::kHeaderSize;
+ Address data_start = data->address() + FeedbackMetadata::kHeaderSize;
+ memset(reinterpret_cast<byte*>(data_start), 0, data_size);
+ // Fields have been zeroed out but not initialized, so this object will not
+ // pass object verification at this point.
+ return data;
+}
+
+template <typename Impl>
+Handle<CoverageInfo> FactoryBase<Impl>::NewCoverageInfo(
+ const ZoneVector<SourceRange>& slots) {
+ const int slot_count = static_cast<int>(slots.size());
+
+ int size = CoverageInfo::SizeFor(slot_count);
+ Map map = read_only_roots().coverage_info_map();
+ HeapObject result =
+ AllocateRawWithImmortalMap(size, AllocationType::kYoung, map);
+ Handle<CoverageInfo> info(CoverageInfo::cast(result), isolate());
+
+ info->set_slot_count(slot_count);
+ for (int i = 0; i < slot_count; i++) {
+ SourceRange range = slots[i];
+ info->InitializeSlot(i, range.start, range.end);
+ }
+
+ return info;
+}
+
+template <typename Impl>
+Handle<SeqOneByteString> FactoryBase<Impl>::NewOneByteInternalizedString(
const Vector<const uint8_t>& str, uint32_t hash_field) {
- FactoryHandle<Impl, SeqOneByteString> result =
+ Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_gc;
MemCopy(result->GetChars(no_gc), str.begin(), str.length());
@@ -26,10 +453,9 @@ FactoryBase<Impl>::NewOneByteInternalizedString(
}
template <typename Impl>
-FactoryHandle<Impl, SeqTwoByteString>
-FactoryBase<Impl>::NewTwoByteInternalizedString(const Vector<const uc16>& str,
- uint32_t hash_field) {
- FactoryHandle<Impl, SeqTwoByteString> result =
+Handle<SeqTwoByteString> FactoryBase<Impl>::NewTwoByteInternalizedString(
+ const Vector<const uc16>& str, uint32_t hash_field) {
+ Handle<SeqTwoByteString> result =
AllocateRawTwoByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_gc;
MemCopy(result->GetChars(no_gc), str.begin(), str.length() * kUC16Size);
@@ -37,11 +463,10 @@ FactoryBase<Impl>::NewTwoByteInternalizedString(const Vector<const uc16>& str,
}
template <typename Impl>
-FactoryMaybeHandle<Impl, SeqOneByteString>
-FactoryBase<Impl>::NewRawOneByteString(int length, AllocationType allocation) {
+MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
+ int length, AllocationType allocation) {
if (length > String::kMaxLength || length < 0) {
- return impl()->template Throw<SeqOneByteString>(
- impl()->NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
}
DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
int size = SeqOneByteString::SizeFor(length);
@@ -49,8 +474,8 @@ FactoryBase<Impl>::NewRawOneByteString(int length, AllocationType allocation) {
HeapObject result = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().one_byte_string_map());
- FactoryHandle<Impl, SeqOneByteString> string =
- handle(SeqOneByteString::cast(result), impl());
+ Handle<SeqOneByteString> string =
+ handle(SeqOneByteString::cast(result), isolate());
string->set_length(length);
string->set_hash_field(String::kEmptyHashField);
DCHECK_EQ(size, string->Size());
@@ -58,11 +483,10 @@ FactoryBase<Impl>::NewRawOneByteString(int length, AllocationType allocation) {
}
template <typename Impl>
-FactoryMaybeHandle<Impl, SeqTwoByteString>
-FactoryBase<Impl>::NewRawTwoByteString(int length, AllocationType allocation) {
+MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawTwoByteString(
+ int length, AllocationType allocation) {
if (length > String::kMaxLength || length < 0) {
- return impl()->template Throw<SeqTwoByteString>(
- impl()->NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
}
DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
int size = SeqTwoByteString::SizeFor(length);
@@ -70,8 +494,8 @@ FactoryBase<Impl>::NewRawTwoByteString(int length, AllocationType allocation) {
HeapObject result = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().string_map());
- FactoryHandle<Impl, SeqTwoByteString> string =
- handle(SeqTwoByteString::cast(result), impl());
+ Handle<SeqTwoByteString> string =
+ handle(SeqTwoByteString::cast(result), isolate());
string->set_length(length);
string->set_hash_field(String::kEmptyHashField);
DCHECK_EQ(size, string->Size());
@@ -79,14 +503,13 @@ FactoryBase<Impl>::NewRawTwoByteString(int length, AllocationType allocation) {
}
template <typename Impl>
-FactoryMaybeHandle<Impl, String> FactoryBase<Impl>::NewConsString(
- FactoryHandle<Impl, String> left, FactoryHandle<Impl, String> right,
- AllocationType allocation) {
+MaybeHandle<String> FactoryBase<Impl>::NewConsString(
+ Handle<String> left, Handle<String> right, AllocationType allocation) {
if (left->IsThinString()) {
- left = handle(ThinString::cast(*left).actual(), impl());
+ left = handle(ThinString::cast(*left).actual(), isolate());
}
if (right->IsThinString()) {
- right = handle(ThinString::cast(*right).actual(), impl());
+ right = handle(ThinString::cast(*right).actual(), isolate());
}
int left_length = left->length();
if (left_length == 0) return right;
@@ -104,8 +527,7 @@ FactoryMaybeHandle<Impl, String> FactoryBase<Impl>::NewConsString(
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
- return impl()->template Throw<String>(
- impl()->NewInvalidStringLengthError());
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
bool left_is_one_byte = left->IsOneByteRepresentation();
@@ -121,7 +543,7 @@ FactoryMaybeHandle<Impl, String> FactoryBase<Impl>::NewConsString(
STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength);
if (is_one_byte) {
- FactoryHandle<Impl, SeqOneByteString> result =
+ Handle<SeqOneByteString> result =
NewRawOneByteString(length, allocation).ToHandleChecked();
DisallowHeapAllocation no_gc;
uint8_t* dest = result->GetChars(no_gc);
@@ -134,7 +556,7 @@ FactoryMaybeHandle<Impl, String> FactoryBase<Impl>::NewConsString(
return result;
}
- FactoryHandle<Impl, SeqTwoByteString> result =
+ Handle<SeqTwoByteString> result =
NewRawTwoByteString(length, allocation).ToHandleChecked();
DisallowHeapAllocation pointer_stays_valid;
@@ -148,22 +570,23 @@ FactoryMaybeHandle<Impl, String> FactoryBase<Impl>::NewConsString(
}
template <typename Impl>
-FactoryHandle<Impl, String> FactoryBase<Impl>::NewConsString(
- FactoryHandle<Impl, String> left, FactoryHandle<Impl, String> right,
- int length, bool one_byte, AllocationType allocation) {
+Handle<String> FactoryBase<Impl>::NewConsString(Handle<String> left,
+ Handle<String> right,
+ int length, bool one_byte,
+ AllocationType allocation) {
DCHECK(!left->IsThinString());
DCHECK(!right->IsThinString());
DCHECK_GE(length, ConsString::kMinLength);
DCHECK_LE(length, String::kMaxLength);
- FactoryHandle<Impl, ConsString> result = handle(
+ Handle<ConsString> result = handle(
ConsString::cast(
one_byte
? NewWithImmortalMap(read_only_roots().cons_one_byte_string_map(),
allocation)
: NewWithImmortalMap(read_only_roots().cons_string_map(),
allocation)),
- impl());
+ isolate());
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
@@ -176,7 +599,81 @@ FactoryHandle<Impl, String> FactoryBase<Impl>::NewConsString(
}
template <typename Impl>
-FactoryHandle<Impl, SeqOneByteString>
+Handle<FreshlyAllocatedBigInt> FactoryBase<Impl>::NewBigInt(
+ int length, AllocationType allocation) {
+ if (length < 0 || length > BigInt::kMaxLength) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid BigInt length");
+ }
+ HeapObject result = AllocateRawWithImmortalMap(
+ BigInt::SizeFor(length), allocation, read_only_roots().bigint_map());
+ FreshlyAllocatedBigInt bigint = FreshlyAllocatedBigInt::cast(result);
+ bigint.clear_padding();
+ return handle(bigint, isolate());
+}
+
+template <typename Impl>
+Handle<ScopeInfo> FactoryBase<Impl>::NewScopeInfo(int length,
+ AllocationType type) {
+ DCHECK(type == AllocationType::kOld || type == AllocationType::kReadOnly);
+ return Handle<ScopeInfo>::cast(NewFixedArrayWithMap(
+ read_only_roots().scope_info_map_handle(), length, type));
+}
+
+template <typename Impl>
+Handle<SourceTextModuleInfo> FactoryBase<Impl>::NewSourceTextModuleInfo() {
+ return Handle<SourceTextModuleInfo>::cast(NewFixedArrayWithMap(
+ read_only_roots().module_info_map_handle(), SourceTextModuleInfo::kLength,
+ AllocationType::kOld));
+}
+
+template <typename Impl>
+Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo() {
+ Map map = read_only_roots().shared_function_info_map();
+
+ Handle<SharedFunctionInfo> shared = handle(
+ SharedFunctionInfo::cast(NewWithImmortalMap(map, AllocationType::kOld)),
+ isolate());
+ int unique_id = -1;
+#if V8_SFI_HAS_UNIQUE_ID
+ unique_id = isolate()->GetNextUniqueSharedFunctionInfoId();
+#endif // V8_SFI_HAS_UNIQUE_ID
+
+ shared->Init(read_only_roots(), unique_id);
+
+#ifdef VERIFY_HEAP
+ shared->SharedFunctionInfoVerify(isolate());
+#endif // VERIFY_HEAP
+ return shared;
+}
+
+template <typename Impl>
+Handle<DescriptorArray> FactoryBase<Impl>::NewDescriptorArray(
+ int number_of_descriptors, int slack, AllocationType allocation) {
+ int number_of_all_descriptors = number_of_descriptors + slack;
+ // Zero-length case must be handled outside.
+ DCHECK_LT(0, number_of_all_descriptors);
+ int size = DescriptorArray::SizeFor(number_of_all_descriptors);
+ HeapObject obj = AllocateRawWithImmortalMap(
+ size, allocation, read_only_roots().descriptor_array_map());
+ DescriptorArray array = DescriptorArray::cast(obj);
+ array.Initialize(read_only_roots().empty_enum_cache(),
+ read_only_roots().undefined_value(), number_of_descriptors,
+ slack);
+ return handle(array, isolate());
+}
+
+template <typename Impl>
+Handle<ClassPositions> FactoryBase<Impl>::NewClassPositions(int start,
+ int end) {
+ Handle<ClassPositions> class_positions = Handle<ClassPositions>::cast(
+ NewStruct(CLASS_POSITIONS_TYPE, AllocationType::kOld));
+ class_positions->set_start(start);
+ class_positions->set_end(end);
+ return class_positions;
+}
+
+template <typename Impl>
+Handle<SeqOneByteString>
FactoryBase<Impl>::AllocateRawOneByteInternalizedString(int length,
uint32_t hash_field) {
CHECK_GE(String::kMaxLength, length);
@@ -190,8 +687,8 @@ FactoryBase<Impl>::AllocateRawOneByteInternalizedString(int length,
impl()->CanAllocateInReadOnlySpace() ? AllocationType::kReadOnly
: AllocationType::kOld,
map);
- FactoryHandle<Impl, SeqOneByteString> answer =
- handle(SeqOneByteString::cast(result), impl());
+ Handle<SeqOneByteString> answer =
+ handle(SeqOneByteString::cast(result), isolate());
answer->set_length(length);
answer->set_hash_field(hash_field);
DCHECK_EQ(size, answer->Size());
@@ -199,7 +696,7 @@ FactoryBase<Impl>::AllocateRawOneByteInternalizedString(int length,
}
template <typename Impl>
-FactoryHandle<Impl, SeqTwoByteString>
+Handle<SeqTwoByteString>
FactoryBase<Impl>::AllocateRawTwoByteInternalizedString(int length,
uint32_t hash_field) {
CHECK_GE(String::kMaxLength, length);
@@ -209,8 +706,8 @@ FactoryBase<Impl>::AllocateRawTwoByteInternalizedString(int length,
int size = SeqTwoByteString::SizeFor(length);
HeapObject result =
AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
- FactoryHandle<Impl, SeqTwoByteString> answer =
- handle(SeqTwoByteString::cast(result), impl());
+ Handle<SeqTwoByteString> answer =
+ handle(SeqTwoByteString::cast(result), isolate());
answer->set_length(length);
answer->set_hash_field(hash_field);
DCHECK_EQ(size, result.Size());
@@ -218,6 +715,35 @@ FactoryBase<Impl>::AllocateRawTwoByteInternalizedString(int length,
}
template <typename Impl>
+HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
+ AllocationType allocation) {
+ HeapObject result = AllocateRaw(size, allocation);
+ if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
+ chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ return result;
+}
+
+template <typename Impl>
+HeapObject FactoryBase<Impl>::AllocateRawFixedArray(int length,
+ AllocationType allocation) {
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ }
+ return AllocateRawArray(FixedArray::SizeFor(length), allocation);
+}
+
+template <typename Impl>
+HeapObject FactoryBase<Impl>::AllocateRawWeakArrayList(
+ int capacity, AllocationType allocation) {
+ if (capacity < 0 || capacity > WeakArrayList::kMaxCapacity) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ }
+ return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), allocation);
+}
+
+template <typename Impl>
HeapObject FactoryBase<Impl>::NewWithImmortalMap(Map map,
AllocationType allocation) {
return AllocateRawWithImmortalMap(map.instance_size(), allocation, map);
@@ -227,6 +753,10 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
AllocationAlignment alignment) {
+ // TODO(delphick): Potentially you could also pass a immortal immovable Map
+ // from MAP_SPACE here, like external_map or message_object_map, but currently
+ // noone does so this check is sufficient.
+ DCHECK(ReadOnlyHeap::Contains(map));
HeapObject result = AllocateRaw(size, allocation, alignment);
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index f49d9ace66..36a193bbf8 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -5,57 +5,210 @@
#ifndef V8_HEAP_FACTORY_BASE_H_
#define V8_HEAP_FACTORY_BASE_H_
+#include "src/base/export-template.h"
#include "src/common/globals.h"
-#include "src/handles/factory-handles.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/instance-type.h"
#include "src/roots/roots.h"
namespace v8 {
namespace internal {
class HeapObject;
+class SharedFunctionInfo;
+class FunctionLiteral;
class SeqOneByteString;
class SeqTwoByteString;
+class FreshlyAllocatedBigInt;
+class ObjectBoilerplateDescription;
+class ArrayBoilerplateDescription;
+class TemplateObjectDescription;
+class SourceTextModuleInfo;
+class PreparseData;
+class UncompiledDataWithoutPreparseData;
+class UncompiledDataWithPreparseData;
+class BytecodeArray;
+class CoverageInfo;
+class ClassPositions;
+struct SourceRange;
+template <typename T>
+class ZoneVector;
template <typename Impl>
-class V8_EXPORT_PRIVATE FactoryBase {
+class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase {
public:
- FactoryHandle<Impl, SeqOneByteString> NewOneByteInternalizedString(
+ // Converts the given boolean condition to JavaScript boolean value.
+ inline Handle<Oddball> ToBoolean(bool value);
+
+ // Numbers (e.g. literals) are pretenured by the parser.
+ // The return value may be a smi or a heap number.
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<Object> NewNumber(double value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<Object> NewNumberFromInt(int32_t value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<Object> NewNumberFromUint(uint32_t value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<Object> NewNumberFromSize(size_t value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<Object> NewNumberFromInt64(int64_t value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumber(double value);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumberFromBits(uint64_t bits);
+ template <AllocationType allocation = AllocationType::kYoung>
+ inline Handle<HeapNumber> NewHeapNumberWithHoleNaN();
+
+ template <AllocationType allocation>
+ Handle<HeapNumber> NewHeapNumber();
+
+ Handle<Struct> NewStruct(InstanceType type,
+ AllocationType allocation = AllocationType::kYoung);
+
+ // Create a pre-tenured empty AccessorPair.
+ Handle<AccessorPair> NewAccessorPair();
+
+ // Allocates a fixed array initialized with undefined values.
+ Handle<FixedArray> NewFixedArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ // Allocates a fixed array-like object with given map and initialized with
+ // undefined values.
+ Handle<FixedArray> NewFixedArrayWithMap(
+ Handle<Map> map, int length,
+ AllocationType allocation = AllocationType::kYoung);
+
+ // Allocate a new fixed array with non-existing entries (the hole).
+ Handle<FixedArray> NewFixedArrayWithHoles(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ // Allocate a new uninitialized fixed double array.
+ // The function returns a pre-allocated empty fixed array for length = 0,
+ // so the return type must be the general fixed array class.
+ Handle<FixedArrayBase> NewFixedDoubleArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ // Allocates a weak fixed array-like object with given map and initialized
+ // with undefined values.
+ Handle<WeakFixedArray> NewWeakFixedArrayWithMap(
+ Map map, int length, AllocationType allocation = AllocationType::kYoung);
+
+ // Allocates a fixed array which may contain in-place weak references. The
+ // array is initialized with undefined values
+ Handle<WeakFixedArray> NewWeakFixedArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ Handle<ByteArray> NewByteArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ Handle<BytecodeArray> NewBytecodeArray(int length, const byte* raw_bytecodes,
+ int frame_size, int parameter_count,
+ Handle<FixedArray> constant_pool);
+
+ // Allocates a fixed array for name-value pairs of boilerplate properties and
+ // calculates the number of properties we need to store in the backing store.
+ Handle<ObjectBoilerplateDescription> NewObjectBoilerplateDescription(
+ int boilerplate, int all_properties, int index_keys, bool has_seen_proto);
+
+ // Create a new ArrayBoilerplateDescription struct.
+ Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
+ ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
+
+ // Create a new TemplateObjectDescription struct.
+ Handle<TemplateObjectDescription> NewTemplateObjectDescription(
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
+
+ Handle<Script> NewScript(Handle<String> source);
+ Handle<Script> NewScriptWithId(Handle<String> source, int script_id);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
+ FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
+
+ Handle<PreparseData> NewPreparseData(int data_length, int children_length);
+
+ Handle<UncompiledDataWithoutPreparseData>
+ NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position);
+
+ Handle<UncompiledDataWithPreparseData> NewUncompiledDataWithPreparseData(
+ Handle<String> inferred_name, int32_t start_position,
+ int32_t end_position, Handle<PreparseData>);
+
+ // Allocates a FeedbackMedata object and zeroes the data section.
+ Handle<FeedbackMetadata> NewFeedbackMetadata(
+ int slot_count, int feedback_cell_count,
+ AllocationType allocation = AllocationType::kOld);
+
+ Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
+
+ Handle<SeqOneByteString> NewOneByteInternalizedString(
const Vector<const uint8_t>& str, uint32_t hash_field);
- FactoryHandle<Impl, SeqTwoByteString> NewTwoByteInternalizedString(
+ Handle<SeqTwoByteString> NewTwoByteInternalizedString(
const Vector<const uc16>& str, uint32_t hash_field);
- FactoryHandle<Impl, SeqOneByteString> AllocateRawOneByteInternalizedString(
+ Handle<SeqOneByteString> AllocateRawOneByteInternalizedString(
int length, uint32_t hash_field);
- FactoryHandle<Impl, SeqTwoByteString> AllocateRawTwoByteInternalizedString(
+ Handle<SeqTwoByteString> AllocateRawTwoByteInternalizedString(
int length, uint32_t hash_field);
// Allocates and partially initializes an one-byte or two-byte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- V8_WARN_UNUSED_RESULT FactoryMaybeHandle<Impl, SeqOneByteString>
- NewRawOneByteString(int length,
- AllocationType allocation = AllocationType::kYoung);
- V8_WARN_UNUSED_RESULT FactoryMaybeHandle<Impl, SeqTwoByteString>
- NewRawTwoByteString(int length,
- AllocationType allocation = AllocationType::kYoung);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
+ int length, AllocationType allocation = AllocationType::kYoung);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString(
+ int length, AllocationType allocation = AllocationType::kYoung);
// Create a new cons string object which consists of a pair of strings.
- V8_WARN_UNUSED_RESULT FactoryMaybeHandle<Impl, String> NewConsString(
- FactoryHandle<Impl, String> left, FactoryHandle<Impl, String> right,
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewConsString(
+ Handle<String> left, Handle<String> right,
+ AllocationType allocation = AllocationType::kYoung);
+
+ V8_WARN_UNUSED_RESULT Handle<String> NewConsString(
+ Handle<String> left, Handle<String> right, int length, bool one_byte,
AllocationType allocation = AllocationType::kYoung);
- V8_WARN_UNUSED_RESULT FactoryHandle<Impl, String> NewConsString(
- FactoryHandle<Impl, String> left, FactoryHandle<Impl, String> right,
- int length, bool one_byte,
+ // Allocates a new BigInt with {length} digits. Only to be used by
+ // MutableBigInt::New*.
+ Handle<FreshlyAllocatedBigInt> NewBigInt(
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ // Create a serialized scope info.
+ Handle<ScopeInfo> NewScopeInfo(int length,
+ AllocationType type = AllocationType::kOld);
+
+ Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
+
+ Handle<DescriptorArray> NewDescriptorArray(
+ int number_of_entries, int slack = 0,
AllocationType allocation = AllocationType::kYoung);
+ Handle<ClassPositions> NewClassPositions(int start, int end);
+
protected:
+ // Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
+ HeapObject AllocateRawArray(int size, AllocationType allocation);
+ HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
+ HeapObject AllocateRawWeakArrayList(int length, AllocationType allocation);
+
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
AllocationAlignment alignment = kWordAligned);
HeapObject NewWithImmortalMap(Map map, AllocationType allocation);
+ Handle<FixedArray> NewFixedArrayWithFiller(Handle<Map> map, int length,
+ Handle<Oddball> filler,
+ AllocationType allocation);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo();
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+ MaybeHandle<String> maybe_name,
+ MaybeHandle<HeapObject> maybe_function_data, int maybe_builtin_index,
+ FunctionKind kind = kNormalFunction);
+
private:
Impl* impl() { return static_cast<Impl*>(this); }
+ auto isolate() { return impl()->isolate(); }
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
HeapObject AllocateRaw(int size, AllocationType allocation,
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 77d03e6d47..65d7ebbf98 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -11,6 +11,7 @@
// Do not include anything from src/heap here!
#include "src/execution/isolate-inl.h"
#include "src/handles/handles-inl.h"
+#include "src/heap/factory-base-inl.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/objects-inl.h"
@@ -43,43 +44,6 @@ Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) {
return NewProperSubString(str, begin, end);
}
-Handle<Object> Factory::NewNumberFromSize(size_t value) {
- // We can't use Smi::IsValid() here because that operates on a signed
- // intptr_t, and casting from size_t could create a bogus sign bit.
- if (value <= static_cast<size_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
- isolate());
- }
- return NewHeapNumber(static_cast<double>(value));
-}
-
-Handle<Object> Factory::NewNumberFromInt64(int64_t value) {
- if (value <= std::numeric_limits<int32_t>::max() &&
- value >= std::numeric_limits<int32_t>::min() &&
- Smi::IsValid(static_cast<int32_t>(value))) {
- return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate());
- }
- return NewHeapNumber(static_cast<double>(value));
-}
-
-template <AllocationType allocation>
-Handle<HeapNumber> Factory::NewHeapNumber(double value) {
- Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
- heap_number->set_value(value);
- return heap_number;
-}
-
-template <AllocationType allocation>
-Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits) {
- Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
- heap_number->set_value_as_bits(bits);
- return heap_number;
-}
-
-Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN() {
- return NewHeapNumberFromBits(kHoleNanInt64);
-}
-
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
AllocationType allocation) {
@@ -101,11 +65,6 @@ Handle<Object> Factory::NewURIError() {
MessageTemplate::kURIMalformed);
}
-template <typename T>
-inline MaybeHandle<T> Factory::Throw(Handle<Object> exception) {
- return isolate()->Throw<T>(exception);
-}
-
ReadOnlyRoots Factory::read_only_roots() { return ReadOnlyRoots(isolate()); }
} // namespace internal
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 56c61b53e6..933a51425f 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -10,7 +10,6 @@
#include <utility> // For move
#include "src/ast/ast-source-ranges.h"
-#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/builtins/accessors.h"
#include "src/builtins/constants-table-builder.h"
@@ -221,10 +220,6 @@ Handle<Code> Factory::CodeBuilder::Build() {
return BuildInternal(true).ToHandleChecked();
}
-void Factory::FatalProcessOutOfHeapMemory(const char* location) {
- isolate()->heap()->FatalProcessOutOfMemory(location);
-}
-
HeapObject Factory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
return isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
@@ -261,32 +256,6 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
}
}
-HeapObject Factory::AllocateRawArray(int size, AllocationType allocation) {
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
- if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
- }
- return result;
-}
-
-HeapObject Factory::AllocateRawFixedArray(int length,
- AllocationType allocation) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
- return AllocateRawArray(FixedArray::SizeFor(length), allocation);
-}
-
-HeapObject Factory::AllocateRawWeakArrayList(int capacity,
- AllocationType allocation) {
- if (capacity < 0 || capacity > WeakArrayList::kMaxCapacity) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
- return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), allocation);
-}
-
HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
@@ -323,8 +292,8 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<FixedArray> indices) {
- Handle<EnumCache> result = Handle<EnumCache>::cast(
- NewStruct(ENUM_CACHE_TYPE, AllocationType::kOld));
+ Handle<EnumCache> result =
+ Handle<EnumCache>::cast(NewStruct(ENUM_CACHE_TYPE, AllocationType::kOld));
result->set_keys(*keys);
result->set_indices(*indices);
return result;
@@ -339,28 +308,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return result;
}
-Handle<ArrayBoilerplateDescription> Factory::NewArrayBoilerplateDescription(
- ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
- Handle<ArrayBoilerplateDescription> result =
- Handle<ArrayBoilerplateDescription>::cast(
- NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
- result->set_elements_kind(elements_kind);
- result->set_constant_elements(*constant_values);
- return result;
-}
-
-Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
- Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
- DCHECK_EQ(raw_strings->length(), cooked_strings->length());
- DCHECK_LT(0, raw_strings->length());
- Handle<TemplateObjectDescription> result =
- Handle<TemplateObjectDescription>::cast(
- NewStruct(TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld));
- result->set_raw_strings(*raw_strings);
- result->set_cooked_strings(*cooked_strings);
- return result;
-}
-
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -387,78 +334,6 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length) {
return array;
}
-Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
- int length, Object filler,
- AllocationType allocation) {
- HeapObject result = AllocateRawFixedArray(length, allocation);
- DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
- Map map = Map::cast(isolate()->root(map_root_index));
- result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- Handle<FixedArray> array(FixedArray::cast(result), isolate());
- array->set_length(length);
- MemsetTagged(array->data_start(), filler, length);
- return array;
-}
-
-template <typename T>
-Handle<T> Factory::NewFixedArrayWithMap(RootIndex map_root_index, int length,
- AllocationType allocation) {
- static_assert(std::is_base_of<FixedArray, T>::value,
- "T must be a descendant of FixedArray");
- // Zero-length case must be handled outside, where the knowledge about
- // the map is.
- DCHECK_LT(0, length);
- return Handle<T>::cast(NewFixedArrayWithFiller(
- map_root_index, length, *undefined_value(), allocation));
-}
-
-template <typename T>
-Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
- int length,
- AllocationType allocation) {
- static_assert(std::is_base_of<WeakFixedArray, T>::value,
- "T must be a descendant of WeakFixedArray");
-
- // Zero-length case must be handled outside.
- DCHECK_LT(0, length);
-
- HeapObject result =
- AllocateRawArray(WeakFixedArray::SizeFor(length), AllocationType::kOld);
- Map map = Map::cast(isolate()->root(map_root_index));
- result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
-
- Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
- array->set_length(length);
- MemsetTagged(ObjectSlot(array->data_start()), *undefined_value(), length);
-
- return Handle<T>::cast(array);
-}
-
-template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
- RootIndex, int, AllocationType allocation);
-
-Handle<FixedArray> Factory::NewFixedArray(int length,
- AllocationType allocation) {
- DCHECK_LE(0, length);
- if (length == 0) return empty_fixed_array();
- return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *undefined_value(), allocation);
-}
-
-Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
- AllocationType allocation) {
- DCHECK_LE(0, length);
- if (length == 0) return empty_weak_fixed_array();
- HeapObject result =
- AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
- result.set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
- Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
- array->set_length(length);
- MemsetTagged(ObjectSlot(array->data_start()), *undefined_value(), length);
- return array;
-}
-
MaybeHandle<FixedArray> Factory::TryNewFixedArray(
int length, AllocationType allocation_type) {
DCHECK_LE(0, length);
@@ -481,14 +356,6 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
return array;
}
-Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
- AllocationType allocation) {
- DCHECK_LE(0, length);
- if (length == 0) return empty_fixed_array();
- return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *the_hole_value(), allocation);
-}
-
Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
if (length < 0 || length > FixedArray::kMaxLength) {
@@ -498,8 +365,9 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
// TODO(ulan): As an experiment this temporarily returns an initialized fixed
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
- return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *undefined_value(), AllocationType::kYoung);
+ return NewFixedArrayWithFiller(read_only_roots().fixed_array_map_handle(),
+ length, undefined_value(),
+ AllocationType::kYoung);
}
Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
@@ -507,9 +375,9 @@ Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
if (length == 0) return empty_closure_feedback_cell_array();
Handle<ClosureFeedbackCellArray> feedback_cell_array =
- NewFixedArrayWithMap<ClosureFeedbackCellArray>(
- RootIndex::kClosureFeedbackCellArrayMap, length,
- AllocationType::kOld);
+ Handle<ClosureFeedbackCellArray>::cast(NewFixedArrayWithMap(
+ read_only_roots().closure_feedback_cell_array_map_handle(), length,
+ AllocationType::kOld));
return feedback_cell_array;
}
@@ -557,56 +425,6 @@ Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
return array;
}
-Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
- int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
- DCHECK_GE(boilerplate, 0);
- DCHECK_GE(all_properties, index_keys);
- DCHECK_GE(index_keys, 0);
-
- int backing_store_size =
- all_properties - index_keys - (has_seen_proto ? 1 : 0);
- DCHECK_GE(backing_store_size, 0);
- bool has_different_size_backing_store = boilerplate != backing_store_size;
-
- // Space for name and value for every boilerplate property + LiteralType flag.
- int size =
- 2 * boilerplate + ObjectBoilerplateDescription::kDescriptionStartIndex;
-
- if (has_different_size_backing_store) {
- // An extra entry for the backing store size.
- size++;
- }
-
- Handle<ObjectBoilerplateDescription> description =
- Handle<ObjectBoilerplateDescription>::cast(
- NewFixedArrayWithMap(RootIndex::kObjectBoilerplateDescriptionMap,
- size, AllocationType::kOld));
-
- if (has_different_size_backing_store) {
- DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
- has_seen_proto);
- description->set_backing_store_size(backing_store_size);
- }
-
- description->set_flags(0);
-
- return description;
-}
-
-Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length) {
- if (length == 0) return empty_fixed_array();
- if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
- int size = FixedDoubleArray::SizeFor(length);
- Map map = *fixed_double_array_map();
- HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung,
- map, kDoubleAligned);
- Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
- array->set_length(length);
- return array;
-}
-
Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) {
DCHECK_LE(0, length);
Handle<FixedArrayBase> array = NewFixedDoubleArray(length);
@@ -616,25 +434,6 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) {
return array;
}
-Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(
- int slot_count, int feedback_cell_count, AllocationType allocation) {
- DCHECK_LE(0, slot_count);
- int size = FeedbackMetadata::SizeFor(slot_count);
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, *feedback_metadata_map());
- Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
- data->set_slot_count(slot_count);
- data->set_closure_feedback_cell_count(feedback_cell_count);
-
- // Initialize the data section to 0.
- int data_size = size - FeedbackMetadata::kHeaderSize;
- Address data_start = data->address() + FeedbackMetadata::kHeaderSize;
- memset(reinterpret_cast<byte*>(data_start), 0, data_size);
- // Fields have been zeroed out but not initialized, so this object will not
- // pass object verification at this point.
- return data;
-}
-
Handle<FrameArray> Factory::NewFrameArray(int number_of_frames) {
DCHECK_LE(0, number_of_frames);
Handle<FixedArray> result =
@@ -702,19 +501,11 @@ Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
.ToHandleChecked();
}
-Handle<AccessorPair> Factory::NewAccessorPair() {
- Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(
- NewStruct(ACCESSOR_PAIR_TYPE, AllocationType::kOld));
- accessors->set_getter(*null_value(), SKIP_WRITE_BARRIER);
- accessors->set_setter(*null_value(), SKIP_WRITE_BARRIER);
- return accessors;
-}
-
Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
Handle<PropertyDescriptorObject> object =
Handle<PropertyDescriptorObject>::cast(
NewStruct(PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung));
- object->set_flags(Smi::zero());
+ object->set_flags(0);
object->set_value(*the_hole_value(), SKIP_WRITE_BARRIER);
object->set_get(*the_hole_value(), SKIP_WRITE_BARRIER);
object->set_set(*the_hole_value(), SKIP_WRITE_BARRIER);
@@ -1200,7 +991,7 @@ Handle<Symbol> Factory::NewSymbol(AllocationType allocation) {
int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
Handle<Symbol> symbol(Symbol::cast(result), isolate());
- symbol->set_hash_field(Name::kIsNotArrayIndexMask |
+ symbol->set_hash_field(Name::kIsNotIntegerIndexMask |
(hash << Name::kHashShift));
symbol->set_description(*undefined_value());
symbol->set_flags(0);
@@ -1278,9 +1069,9 @@ Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
}
Handle<ScriptContextTable> Factory::NewScriptContextTable() {
- Handle<ScriptContextTable> context_table =
- NewFixedArrayWithMap<ScriptContextTable>(
- RootIndex::kScriptContextTableMap, ScriptContextTable::kMinLength);
+ Handle<ScriptContextTable> context_table = Handle<ScriptContextTable>::cast(
+ NewFixedArrayWithMap(read_only_roots().script_context_table_map_handle(),
+ ScriptContextTable::kMinLength));
context_table->set_used(0);
return context_table;
}
@@ -1401,16 +1192,6 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
return context;
}
-Handle<Struct> Factory::NewStruct(InstanceType type,
- AllocationType allocation) {
- Map map = Map::GetStructMap(isolate(), type);
- int size = map.instance_size();
- HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
- Handle<Struct> str(Struct::cast(result), isolate());
- str->InitializeBody(size);
- return str;
-}
-
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
@@ -1436,41 +1217,16 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return info;
}
-Handle<Script> Factory::NewScript(Handle<String> source) {
- return NewScriptWithId(source, isolate()->heap()->NextScriptId());
-}
-
-Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id) {
- // Create and initialize script object.
- Heap* heap = isolate()->heap();
- ReadOnlyRoots roots(heap);
- Handle<Script> script =
- Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
- script->set_source(*source);
- script->set_name(roots.undefined_value());
- script->set_id(script_id);
- script->set_line_offset(0);
- script->set_column_offset(0);
- script->set_context_data(roots.undefined_value());
- script->set_type(Script::TYPE_NORMAL);
- script->set_line_ends(roots.undefined_value());
- script->set_eval_from_shared_or_wrapped_arguments(roots.undefined_value());
- script->set_eval_from_position(0);
- script->set_shared_function_infos(*empty_weak_fixed_array(),
- SKIP_WRITE_BARRIER);
- script->set_flags(0);
- script->set_host_defined_options(*empty_fixed_array());
+void Factory::AddToScriptList(Handle<Script> script) {
Handle<WeakArrayList> scripts = script_list();
scripts = WeakArrayList::Append(isolate(), scripts,
MaybeObjectHandle::Weak(script));
- heap->set_script_list(*scripts);
- LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
- return script;
+ isolate()->heap()->set_script_list(*scripts);
}
Handle<Script> Factory::CloneScript(Handle<Script> script) {
Heap* heap = isolate()->heap();
- int script_id = isolate()->heap()->NextScriptId();
+ int script_id = isolate()->GetNextScriptId();
Handle<Script> new_script =
Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
new_script->set_source(script->source());
@@ -1540,50 +1296,6 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
return foreign;
}
-Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
- int size = ByteArray::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, allocation, *byte_array_map());
- Handle<ByteArray> array(ByteArray::cast(result), isolate());
- array->set_length(length);
- array->clear_padding();
- return array;
-}
-
-Handle<BytecodeArray> Factory::NewBytecodeArray(
- int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
- Handle<FixedArray> constant_pool) {
- if (length < 0 || length > BytecodeArray::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
- // Bytecode array is AllocationType::kOld, so constant pool array should be
- // too.
- DCHECK(!Heap::InYoungGeneration(*constant_pool));
-
- int size = BytecodeArray::SizeFor(length);
- HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
- *bytecode_array_map());
- Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
- instance->set_length(length);
- instance->set_frame_size(frame_size);
- instance->set_parameter_count(parameter_count);
- instance->set_incoming_new_target_or_generator_register(
- interpreter::Register::invalid_value());
- instance->set_osr_loop_nesting_level(0);
- instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
- instance->set_constant_pool(*constant_pool);
- instance->set_handler_table(*empty_byte_array());
- instance->set_source_position_table(*undefined_value());
- CopyBytes(reinterpret_cast<byte*>(instance->GetFirstBytecodeAddress()),
- raw_bytecodes, length);
- instance->clear_padding();
-
- return instance;
-}
-
Handle<Cell> Factory::NewCell(Handle<Object> value) {
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
HeapObject result = AllocateRawWithImmortalMap(
@@ -1594,31 +1306,34 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
}
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
- AllocationType::kOld, *no_closures_cell_map());
+ HeapObject result =
+ AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+ AllocationType::kOld, *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
- cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ cell->SetInitialInterruptBudget();
cell->clear_padding();
return cell;
}
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
- AllocationType::kOld, *one_closure_cell_map());
+ HeapObject result =
+ AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+ AllocationType::kOld, *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
- cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ cell->SetInitialInterruptBudget();
cell->clear_padding();
return cell;
}
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
- AllocationType::kOld, *many_closures_cell_map());
+ AllocationType::kOld,
+ *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
- cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ cell->SetInitialInterruptBudget();
cell->clear_padding();
return cell;
}
@@ -1638,26 +1353,12 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
return cell;
}
-Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
- int slack) {
- int number_of_all_descriptors = number_of_descriptors + slack;
- // Zero-length case must be handled outside.
- DCHECK_LT(0, number_of_all_descriptors);
- int size = DescriptorArray::SizeFor(number_of_all_descriptors);
- HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
- size, AllocationType::kYoung);
- obj.set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
- DescriptorArray array = DescriptorArray::cast(obj);
- array.Initialize(*empty_enum_cache(), *undefined_value(),
- number_of_descriptors, slack);
- return Handle<DescriptorArray>(array, isolate());
-}
-
Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
- Handle<TransitionArray> array = NewWeakFixedArrayWithMap<TransitionArray>(
- RootIndex::kTransitionArrayMap, capacity, AllocationType::kOld);
+ Handle<TransitionArray> array = Handle<TransitionArray>::cast(
+ NewWeakFixedArrayWithMap(read_only_roots().transition_array_map(),
+ capacity, AllocationType::kOld));
// Transition arrays are AllocationType::kOld. When black allocation is on we
// have to add the transition array to the list of
// encountered_transition_arrays.
@@ -2019,71 +1720,12 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
return result;
}
-template <AllocationType allocation>
-Handle<Object> Factory::NewNumber(double value) {
- // Materialize as a SMI if possible.
- int32_t int_value;
- if (DoubleToSmiInteger(value, &int_value)) {
- return handle(Smi::FromInt(int_value), isolate());
- }
- return NewHeapNumber<allocation>(value);
-}
-
-template Handle<Object> V8_EXPORT_PRIVATE
-Factory::NewNumber<AllocationType::kYoung>(double);
-template Handle<Object> V8_EXPORT_PRIVATE
-Factory::NewNumber<AllocationType::kOld>(double);
-template Handle<Object> V8_EXPORT_PRIVATE
-Factory::NewNumber<AllocationType::kReadOnly>(double);
-
-Handle<Object> Factory::NewNumberFromInt(int32_t value) {
- if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
- // Bypass NewNumber to avoid various redundant checks.
- return NewHeapNumber(FastI2D(value));
-}
-
-Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
- int32_t int32v = static_cast<int32_t>(value);
- if (int32v >= 0 && Smi::IsValid(int32v)) {
- return handle(Smi::FromInt(int32v), isolate());
- }
- return NewHeapNumber(FastUI2D(value));
-}
-
-template <AllocationType allocation>
-Handle<HeapNumber> Factory::NewHeapNumber() {
- STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map map = *heap_number_map();
- HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, allocation,
- map, kDoubleUnaligned);
- return handle(HeapNumber::cast(result), isolate());
-}
-
-template Handle<HeapNumber> V8_EXPORT_PRIVATE
-Factory::NewHeapNumber<AllocationType::kYoung>();
-template Handle<HeapNumber> V8_EXPORT_PRIVATE
-Factory::NewHeapNumber<AllocationType::kOld>();
-template Handle<HeapNumber> V8_EXPORT_PRIVATE
-Factory::NewHeapNumber<AllocationType::kReadOnly>();
-
Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
return isolate()->heap()->CanAllocateInReadOnlySpace()
? NewHeapNumber<AllocationType::kReadOnly>(value)
: NewHeapNumber<AllocationType::kOld>(value);
}
-Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
- AllocationType allocation) {
- if (length < 0 || length > BigInt::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid BigInt length");
- }
- HeapObject result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
- allocation, *bigint_map());
- FreshlyAllocatedBigInt bigint = FreshlyAllocatedBigInt::cast(result);
- bigint.clear_padding();
- return handle(bigint, isolate());
-}
-
Handle<JSObject> Factory::NewError(Handle<JSFunction> constructor,
MessageTemplate template_index,
Handle<Object> arg0, Handle<Object> arg1,
@@ -2363,60 +2005,6 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
return result;
}
-Handle<ScopeInfo> Factory::NewScopeInfo(int length, AllocationType type) {
- DCHECK(type == AllocationType::kOld || type == AllocationType::kReadOnly);
- return NewFixedArrayWithMap<ScopeInfo>(RootIndex::kScopeInfoMap, length,
- type);
-}
-
-Handle<SourceTextModuleInfo> Factory::NewSourceTextModuleInfo() {
- return NewFixedArrayWithMap<SourceTextModuleInfo>(
- RootIndex::kModuleInfoMap, SourceTextModuleInfo::kLength,
- AllocationType::kOld);
-}
-
-Handle<PreparseData> Factory::NewPreparseData(int data_length,
- int children_length) {
- int size = PreparseData::SizeFor(data_length, children_length);
- Handle<PreparseData> result(
- PreparseData::cast(AllocateRawWithImmortalMap(size, AllocationType::kOld,
- *preparse_data_map())),
- isolate());
- result->set_data_length(data_length);
- result->set_children_length(children_length);
- MemsetTagged(result->inner_data_start(), *null_value(), children_length);
- result->clear_padding();
- return result;
-}
-
-Handle<UncompiledDataWithoutPreparseData>
-Factory::NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
- int32_t start_position,
- int32_t end_position) {
- Handle<UncompiledDataWithoutPreparseData> result(
- UncompiledDataWithoutPreparseData::cast(New(
- uncompiled_data_without_preparse_data_map(), AllocationType::kOld)),
- isolate());
-
- result->Init(*inferred_name, start_position, end_position);
- return result;
-}
-
-Handle<UncompiledDataWithPreparseData>
-Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
- int32_t start_position,
- int32_t end_position,
- Handle<PreparseData> preparse_data) {
- Handle<UncompiledDataWithPreparseData> result(
- UncompiledDataWithPreparseData::cast(
- New(uncompiled_data_with_preparse_data_map(), AllocationType::kOld)),
- isolate());
-
- result->Init(*inferred_name, start_position, end_position, *preparse_data);
-
- return result;
-}
-
Handle<JSObject> Factory::NewExternal(void* value) {
Handle<Foreign> foreign = NewForeign(reinterpret_cast<Address>(value));
Handle<JSObject> external = NewJSObjectFromMap(external_map());
@@ -3210,18 +2798,6 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
InitializeJSObjectFromMap(object, raw_properties_or_hash, map);
}
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
- FunctionLiteral* literal, Handle<Script> script, bool is_toplevel) {
- FunctionKind kind = literal->kind();
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfoForBuiltin(
- literal->name(), Builtins::kCompileLazy, kind);
- SharedFunctionInfo::InitFromFunctionLiteral(isolate(), shared, literal,
- is_toplevel);
- shared->SetScript(ReadOnlyRoots(isolate()), *script,
- literal->function_literal_id(), false);
- return shared;
-}
-
Handle<JSMessageObject> Factory::NewJSMessageObject(
MessageTemplate message, Handle<Object> argument, int start_position,
int end_position, Handle<SharedFunctionInfo> shared_info,
@@ -3251,7 +2827,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
DCHECK_EQ(bytecode_offset, -1);
} else {
message_obj->set_shared_info(*shared_info);
- DCHECK_GE(bytecode_offset, 0);
+ DCHECK_GE(bytecode_offset, kFunctionEntryBytecodeOffset);
}
}
@@ -3281,64 +2857,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
return shared;
}
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
- int maybe_builtin_index, FunctionKind kind) {
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo();
-
- // Function names are assumed to be flat elsewhere. Must flatten before
- // allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
- Handle<String> shared_name;
- bool has_shared_name = maybe_name.ToHandle(&shared_name);
- if (has_shared_name) {
- shared_name = String::Flatten(isolate(), shared_name, AllocationType::kOld);
- shared->set_name_or_scope_info(*shared_name);
- } else {
- DCHECK_EQ(shared->name_or_scope_info(),
- SharedFunctionInfo::kNoSharedNameSentinel);
- }
-
- Handle<HeapObject> function_data;
- if (maybe_function_data.ToHandle(&function_data)) {
- // If we pass function_data then we shouldn't pass a builtin index, and
- // the function_data should not be code with a builtin.
- DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
- DCHECK_IMPLIES(function_data->IsCode(),
- !Code::cast(*function_data).is_builtin());
- shared->set_function_data(*function_data);
- } else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
- shared->set_builtin_id(maybe_builtin_index);
- } else {
- shared->set_builtin_id(Builtins::kIllegal);
- }
-
- shared->CalculateConstructAsBuiltin();
- shared->set_kind(kind);
-
-#ifdef VERIFY_HEAP
- shared->SharedFunctionInfoVerify(isolate());
-#endif // VERIFY_HEAP
- return shared;
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo() {
- Handle<Map> map = shared_function_info_map();
-
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(New(map, AllocationType::kOld)), isolate());
- int unique_id = -1;
-#if V8_SFI_HAS_UNIQUE_ID
- unique_id = isolate()->GetNextUniqueSharedFunctionInfoId();
-#endif // V8_SFI_HAS_UNIQUE_ID
-
- shared->Init(ReadOnlyRoots(isolate()), unique_id);
-
-#ifdef VERIFY_HEAP
- shared->SharedFunctionInfoVerify(isolate());
-#endif // VERIFY_HEAP
- return shared;
-}
-
namespace {
inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi number) {
int mask = (cache->length() >> 1) - 1;
@@ -3478,14 +2996,6 @@ Handle<String> Factory::SizeToString(size_t value, bool check_cache) {
return result;
}
-Handle<ClassPositions> Factory::NewClassPositions(int start, int end) {
- Handle<ClassPositions> class_positions = Handle<ClassPositions>::cast(
- NewStruct(CLASS_POSITIONS_TYPE, AllocationType::kOld));
- class_positions->set_start(start);
- class_positions->set_end(end);
- return class_positions;
-}
-
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
DCHECK(!shared->HasDebugInfo());
Heap* heap = isolate()->heap();
@@ -3509,22 +3019,6 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info;
}
-Handle<CoverageInfo> Factory::NewCoverageInfo(
- const ZoneVector<SourceRange>& slots) {
- const int slot_count = static_cast<int>(slots.size());
-
- const int length = CoverageInfo::FixedArrayLengthForSlotCount(slot_count);
- Handle<CoverageInfo> info =
- Handle<CoverageInfo>::cast(NewUninitializedFixedArray(length));
-
- for (int i = 0; i < slot_count; i++) {
- SourceRange range = slots[i];
- info->InitializeSlot(i, range.start, range.end);
- }
-
- return info;
-}
-
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
NewStruct(BREAK_POINT_INFO_TYPE, AllocationType::kOld));
@@ -3807,10 +3301,6 @@ Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
return Handle<Object>::null();
}
-Handle<Object> Factory::ToBoolean(bool value) {
- return value ? true_value() : false_value();
-}
-
Handle<String> Factory::ToPrimitiveHintString(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
@@ -4061,6 +3551,8 @@ bool Factory::EmptyStringRootIsInitialized() {
NewFunctionArgs NewFunctionArgs::ForWasm(
Handle<String> name,
Handle<WasmExportedFunctionData> exported_function_data, Handle<Map> map) {
+ DCHECK(name->IsFlat());
+
NewFunctionArgs args;
args.name_ = name;
args.maybe_map_ = map;
@@ -4075,6 +3567,8 @@ NewFunctionArgs NewFunctionArgs::ForWasm(
NewFunctionArgs NewFunctionArgs::ForWasm(
Handle<String> name, Handle<WasmJSFunctionData> js_function_data,
Handle<Map> map) {
+ DCHECK(name->IsFlat());
+
NewFunctionArgs args;
args.name_ = name;
args.maybe_map_ = map;
@@ -4089,6 +3583,7 @@ NewFunctionArgs NewFunctionArgs::ForWasm(
NewFunctionArgs NewFunctionArgs::ForBuiltin(Handle<String> name,
Handle<Map> map, int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK(name->IsFlat());
NewFunctionArgs args;
args.name_ = name;
@@ -4105,6 +3600,8 @@ NewFunctionArgs NewFunctionArgs::ForBuiltin(Handle<String> name,
// static
NewFunctionArgs NewFunctionArgs::ForFunctionWithoutCode(
Handle<String> name, Handle<Map> map, LanguageMode language_mode) {
+ DCHECK(name->IsFlat());
+
NewFunctionArgs args;
args.name_ = name;
args.maybe_map_ = map;
@@ -4123,6 +3620,7 @@ NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
int instance_size, int inobject_properties, int builtin_id,
MutableMode prototype_mutability) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK(name->IsFlat());
NewFunctionArgs args;
args.name_ = name;
@@ -4145,6 +3643,7 @@ NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
NewFunctionArgs NewFunctionArgs::ForBuiltinWithoutPrototype(
Handle<String> name, int builtin_id, LanguageMode language_mode) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK(name->IsFlat());
NewFunctionArgs args;
args.name_ = name;
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 877e2f8972..81041f7f40 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -16,7 +16,6 @@
#include "src/heap/heap.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
-#include "src/objects/function-kind.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/string.h"
@@ -55,26 +54,19 @@ class JSWeakMap;
class LoadHandler;
class NativeContext;
class NewFunctionArgs;
-class PreparseData;
class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
class SourceTextModule;
-class SourceTextModuleInfo;
class StackFrameInfo;
class StackTraceFrame;
class StoreHandler;
class SyntheticModule;
class TemplateObjectDescription;
-class UncompiledDataWithoutPreparseData;
-class UncompiledDataWithPreparseData;
class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
class WeakCell;
-struct SourceRange;
-template <typename T>
-class ZoneVector;
enum class SharedFlag : uint8_t;
enum class InitializedFlag : uint8_t;
@@ -106,24 +98,18 @@ enum FunctionMode {
kWithReadonlyPrototypeBit | kWithNameBit,
};
-class Factory;
-
-template <>
-struct FactoryTraits<Factory> {
- template <typename T>
- using HandleType = Handle<T>;
- template <typename T>
- using MaybeHandleType = v8::internal::MaybeHandle<T>;
-};
-
// Interface for handle based allocation.
class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
public:
inline ReadOnlyRoots read_only_roots();
+
template <typename T>
Handle<T> MakeHandle(T obj) {
return handle(obj, isolate());
}
+
+#include "torque-generated/factory-tq.inc"
+
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -131,29 +117,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Marks self references within code generation.
Handle<Oddball> NewSelfReferenceMarker();
- // Allocates a fixed array-like object with given map and initialized with
- // undefined values.
- template <typename T = FixedArray>
- Handle<T> NewFixedArrayWithMap(
- RootIndex map_root_index, int length,
- AllocationType allocation = AllocationType::kYoung);
-
- // Allocates a weak fixed array-like object with given map and initialized
- // with undefined values.
- template <typename T = WeakFixedArray>
- Handle<T> NewWeakFixedArrayWithMap(
- RootIndex map_root_index, int length,
- AllocationType allocation = AllocationType::kYoung);
-
- // Allocates a fixed array initialized with undefined values.
- Handle<FixedArray> NewFixedArray(
- int length, AllocationType allocation = AllocationType::kYoung);
-
- // Allocates a fixed array which may contain in-place weak references. The
- // array is initialized with undefined values
- Handle<WeakFixedArray> NewWeakFixedArray(
- int length, AllocationType allocation = AllocationType::kYoung);
-
// Allocates a property array initialized with undefined values.
Handle<PropertyArray> NewPropertyArray(int length);
// Tries allocating a fixed array initialized with undefined values.
@@ -165,10 +128,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<FixedArray> TryNewFixedArray(
int length, AllocationType allocation = AllocationType::kYoung);
- // Allocate a new fixed array with non-existing entries (the hole).
- Handle<FixedArray> NewFixedArrayWithHoles(
- int length, AllocationType allocation = AllocationType::kYoung);
-
// Allocates an uninitialized fixed array. It must be filled by the caller.
Handle<FixedArray> NewUninitializedFixedArray(int length);
@@ -185,24 +144,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocates a clean embedder data array with given capacity.
Handle<EmbedderDataArray> NewEmbedderDataArray(int length);
- // Allocates a fixed array for name-value pairs of boilerplate properties and
- // calculates the number of properties we need to store in the backing store.
- Handle<ObjectBoilerplateDescription> NewObjectBoilerplateDescription(
- int boilerplate, int all_properties, int index_keys, bool has_seen_proto);
-
- // Allocate a new uninitialized fixed double array.
- // The function returns a pre-allocated empty fixed array for length = 0,
- // so the return type must be the general fixed array class.
- Handle<FixedArrayBase> NewFixedDoubleArray(int length);
-
// Allocate a new fixed double array with hole values.
Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(int size);
- // Allocates a FeedbackMedata object and zeroes the data section.
- Handle<FeedbackMetadata> NewFeedbackMetadata(
- int slot_count, int feedback_cell_count,
- AllocationType allocation = AllocationType::kOld);
-
Handle<FrameArray> NewFrameArray(int number_of_frames);
Handle<OrderedHashSet> NewOrderedHashSet();
@@ -230,17 +174,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2,
AllocationType allocation);
- // Create a new ArrayBoilerplateDescription struct.
- Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
- ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
-
- // Create a new TemplateObjectDescription struct.
- Handle<TemplateObjectDescription> NewTemplateObjectDescription(
- Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
-
- // Create a pre-tenured empty AccessorPair.
- Handle<AccessorPair> NewAccessorPair();
-
// Create a new PropertyDescriptorObject struct.
Handle<PropertyDescriptorObject> NewPropertyDescriptorObject();
@@ -417,16 +350,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Context> NewBuiltinContext(Handle<NativeContext> native_context,
int length);
- Handle<Struct> NewStruct(InstanceType type,
- AllocationType allocation = AllocationType::kYoung);
-
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
Handle<AccessorInfo> NewAccessorInfo();
- Handle<Script> NewScript(Handle<String> source);
- Handle<Script> NewScriptWithId(Handle<String> source, int script_id);
Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
@@ -448,13 +376,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr);
- Handle<ByteArray> NewByteArray(
- int length, AllocationType allocation = AllocationType::kYoung);
-
- Handle<BytecodeArray> NewBytecodeArray(int length, const byte* raw_bytecodes,
- int frame_size, int parameter_count,
- Handle<FixedArray> constant_pool);
-
Handle<Cell> NewCell(Handle<Object> value);
Handle<PropertyCell> NewPropertyCell(
@@ -464,8 +385,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
- Handle<DescriptorArray> NewDescriptorArray(int number_of_entries,
- int slack = 0);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -534,36 +453,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<FixedDoubleArray> CopyFixedDoubleArray(Handle<FixedDoubleArray> array);
- // Numbers (e.g. literals) are pretenured by the parser.
- // The return value may be a smi or a heap number.
- template <AllocationType allocation = AllocationType::kYoung>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<Object> NewNumber(double value);
- Handle<Object> NewNumberFromInt(int32_t value);
- Handle<Object> NewNumberFromUint(uint32_t value);
- inline Handle<Object> NewNumberFromSize(size_t value);
- inline Handle<Object> NewNumberFromInt64(int64_t value);
- template <AllocationType allocation = AllocationType::kYoung>
- inline Handle<HeapNumber> NewHeapNumber(double value);
- template <AllocationType allocation = AllocationType::kYoung>
- inline Handle<HeapNumber> NewHeapNumberFromBits(uint64_t bits);
-
- // Creates heap number object with not yet set value field.
- template <AllocationType allocation = AllocationType::kYoung>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<HeapNumber> NewHeapNumber();
-
// Creates a new HeapNumber in read-only space if possible otherwise old
// space.
Handle<HeapNumber> NewHeapNumberForCodeAssembler(double value);
- inline Handle<HeapNumber> NewHeapNumberWithHoleNaN();
-
- // Allocates a new BigInt with {length} digits. Only to be used by
- // MutableBigInt::New*.
- Handle<FreshlyAllocatedBigInt> NewBigInt(
- int length, AllocationType allocation = AllocationType::kYoung);
-
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
// Allocates and initializes a new JavaScript object based on a
@@ -736,23 +629,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> map, Handle<SharedFunctionInfo> info, Handle<Context> context,
AllocationType allocation = AllocationType::kOld);
- // Create a serialized scope info.
- Handle<ScopeInfo> NewScopeInfo(int length,
- AllocationType type = AllocationType::kOld);
-
- Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
-
- Handle<PreparseData> NewPreparseData(int data_length, int children_length);
-
- Handle<UncompiledDataWithoutPreparseData>
- NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
- int32_t start_position,
- int32_t end_position);
-
- Handle<UncompiledDataWithPreparseData> NewUncompiledDataWithPreparseData(
- Handle<String> inferred_name, int32_t start_position,
- int32_t end_position, Handle<PreparseData>);
-
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -826,9 +702,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<String> name, int builtin_index,
FunctionKind kind = kNormalFunction);
- Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
- FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
-
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
return (function_mode & kWithPrototypeBits) != 0;
}
@@ -859,11 +732,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
int end_position, Handle<SharedFunctionInfo> shared_info,
int bytecode_offset, Handle<Script> script, Handle<Object> stack_frames);
- Handle<ClassPositions> NewClassPositions(int start, int end);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
- Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
-
// Return a map for given number of properties using the map cache in the
// native context.
Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
@@ -892,9 +762,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Returns a null handle when the given name is unknown.
Handle<Object> GlobalConstantFor(Handle<Name> name);
- // Converts the given boolean condition to JavaScript boolean value.
- Handle<Object> ToBoolean(bool value);
-
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
@@ -1003,12 +870,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Customization points for FactoryBase
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
- template <typename T>
- inline MaybeHandle<T> Throw(Handle<Object> exception);
- [[noreturn]] void FatalProcessOutOfHeapMemory(const char* location);
- bool CanAllocateInReadOnlySpace();
- bool EmptyStringRootIsInitialized();
- // ------
Isolate* isolate() {
// Downcast to the privately inherited sub-class using c-style casts to
@@ -1017,6 +878,13 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// NOLINTNEXTLINE (google-readability-casting)
return (Isolate*)this; // NOLINT(readability/casting)
}
+ bool CanAllocateInReadOnlySpace();
+ bool EmptyStringRootIsInitialized();
+
+ Handle<String> MakeOrFindTwoCharacterString(uint16_t c1, uint16_t c2);
+
+ void AddToScriptList(Handle<Script> shared);
+ // ------
HeapObject AllocateRawWithAllocationSite(
Handle<Map> map, AllocationType allocation,
@@ -1026,14 +894,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> map, Handle<FixedArrayBase> elements,
Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length);
- // Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
- HeapObject AllocateRawArray(int size, AllocationType allocation);
- HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
- HeapObject AllocateRawWeakArrayList(int length, AllocationType allocation);
- Handle<FixedArray> NewFixedArrayWithFiller(RootIndex map_root_index,
- int length, Object filler,
- AllocationType allocation);
-
// Allocates new context with given map, sets length and initializes the
// after-header part with uninitialized values and leaves the context header
// uninitialized.
@@ -1065,8 +925,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
AllocationType allocation);
- Handle<String> MakeOrFindTwoCharacterString(uint16_t c1, uint16_t c2);
-
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
Handle<Object> NumberToStringCacheGet(Object number, int hash);
@@ -1089,11 +947,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
ElementsKind elements_kind, int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
- Handle<SharedFunctionInfo> NewSharedFunctionInfo();
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
- int maybe_builtin_index, FunctionKind kind = kNormalFunction);
-
void InitializeAllocationMemento(AllocationMemento memento,
AllocationSite allocation_site);
diff --git a/deps/v8/src/heap/finalization-registry-cleanup-task.cc b/deps/v8/src/heap/finalization-registry-cleanup-task.cc
new file mode 100644
index 0000000000..c1868d4862
--- /dev/null
+++ b/deps/v8/src/heap/finalization-registry-cleanup-task.cc
@@ -0,0 +1,77 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/finalization-registry-cleanup-task.h"
+
+#include "src/execution/frames.h"
+#include "src/execution/interrupts-scope.h"
+#include "src/execution/stack-guard.h"
+#include "src/execution/v8threads.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/tracing/trace-event.h"
+
+namespace v8 {
+namespace internal {
+
+FinalizationRegistryCleanupTask::FinalizationRegistryCleanupTask(Heap* heap)
+ : CancelableTask(heap->isolate()), heap_(heap) {}
+
+void FinalizationRegistryCleanupTask::SlowAssertNoActiveJavaScript() {
+#ifdef ENABLE_SLOW_DCHECKS
+ class NoActiveJavaScript : public ThreadVisitor {
+ public:
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ DCHECK(!it.frame()->is_java_script());
+ }
+ }
+ };
+ NoActiveJavaScript no_active_js_visitor;
+ Isolate* isolate = heap_->isolate();
+ no_active_js_visitor.VisitThread(isolate, isolate->thread_local_top());
+ isolate->thread_manager()->IterateArchivedThreads(&no_active_js_visitor);
+#endif // ENABLE_SLOW_DCHECKS
+}
+
+void FinalizationRegistryCleanupTask::RunInternal() {
+ Isolate* isolate = heap_->isolate();
+ DCHECK(!isolate->host_cleanup_finalization_group_callback());
+ SlowAssertNoActiveJavaScript();
+
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8",
+ "V8.FinalizationRegistryCleanupTask");
+
+ HandleScope handle_scope(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry;
+ // There could be no dirty FinalizationRegistries. When a context is disposed
+ // by the embedder, its FinalizationRegistries are removed from the dirty
+ // list.
+ if (!heap_->DequeueDirtyJSFinalizationRegistry().ToHandle(
+ &finalization_registry)) {
+ return;
+ }
+ finalization_registry->set_scheduled_for_cleanup(false);
+
+ // Since FinalizationRegistry cleanup callbacks are scheduled by V8, enter the
+ // FinalizationRegistry's context.
+ Handle<Context> context(
+ Context::cast(finalization_registry->native_context()), isolate);
+ Handle<Object> callback(finalization_registry->cleanup(), isolate);
+ v8::Context::Scope context_scope(v8::Utils::ToLocal(context));
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(true);
+
+ // Exceptions are reported via the message handler. This is ensured by the
+ // verbose TryCatch.
+ InvokeFinalizationRegistryCleanupFromTask(context, finalization_registry,
+ callback);
+
+ // Repost if there are remaining dirty FinalizationRegistries.
+ heap_->set_is_finalization_registry_cleanup_task_posted(false);
+ heap_->PostFinalizationRegistryCleanupTaskIfNeeded();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/finalization-registry-cleanup-task.h b/deps/v8/src/heap/finalization-registry-cleanup-task.h
new file mode 100644
index 0000000000..bb25c1abec
--- /dev/null
+++ b/deps/v8/src/heap/finalization-registry-cleanup-task.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FINALIZATION_REGISTRY_CLEANUP_TASK_H_
+#define V8_HEAP_FINALIZATION_REGISTRY_CLEANUP_TASK_H_
+
+#include "src/objects/js-weak-refs.h"
+#include "src/tasks/cancelable-task.h"
+
+namespace v8 {
+namespace internal {
+
+// The GC schedules a cleanup task when the dirty FinalizationRegistry list is
+// non-empty. The task processes a single FinalizationRegistry and posts another
+// cleanup task if there are remaining dirty FinalizationRegistries on the list.
+class FinalizationRegistryCleanupTask : public CancelableTask {
+ public:
+ explicit FinalizationRegistryCleanupTask(Heap* heap);
+ ~FinalizationRegistryCleanupTask() override = default;
+
+ private:
+ FinalizationRegistryCleanupTask(const FinalizationRegistryCleanupTask&) =
+ delete;
+ void operator=(const FinalizationRegistryCleanupTask&) = delete;
+
+ void RunInternal() override;
+ void SlowAssertNoActiveJavaScript();
+
+ Heap* heap_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FINALIZATION_REGISTRY_CLEANUP_TASK_H_
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 7b1defb935..ea42812585 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -12,10 +12,7 @@ namespace v8 {
namespace internal {
const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
-const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
-const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
-
void GCIdleTimeHeapState::Print() {
PrintF("contexts_disposed=%d ", contexts_disposed);
@@ -39,18 +36,6 @@ size_t GCIdleTimeHandler::EstimateMarkingStepSize(
return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
}
-double GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
- size_t size_of_objects,
- double final_incremental_mark_compact_speed_in_bytes_per_ms) {
- if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
- final_incremental_mark_compact_speed_in_bytes_per_ms =
- kInitialConservativeFinalIncrementalMarkCompactSpeed;
- }
- double result =
- size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
- return Min<double>(result, kMaxFinalIncrementalMarkCompactTimeInMs);
-}
-
bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
int contexts_disposed, double contexts_disposal_rate,
size_t size_of_objects) {
@@ -59,34 +44,13 @@ bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
size_of_objects <= kMaxHeapSizeForContextDisposalMarkCompact;
}
-bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
- double idle_time_in_ms, size_t size_of_objects,
- double final_incremental_mark_compact_speed_in_bytes_per_ms) {
- return idle_time_in_ms >=
- EstimateFinalIncrementalMarkCompactTime(
- size_of_objects,
- final_incremental_mark_compact_speed_in_bytes_per_ms);
-}
-
-bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
- double idle_time_in_ms) {
- // TODO(jochen): Estimate the time it will take to build the object groups.
- return idle_time_in_ms >= kMinTimeForOverApproximatingWeakClosureInMs;
-}
-
-
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
// a full GC.
// (2) If the context disposal rate is high and we cannot perform a full GC,
// we do nothing until the context disposal rate becomes lower.
-// (3) If the new space is almost full and we can afford a scavenge or if the
-// next scavenge will very likely take long, then a scavenge is performed.
-// (4) If sweeping is in progress and we received a large enough idle time
-// request, we finalize sweeping here.
-// (5) If incremental marking is in progress, we perform a marking step. Note,
-// that this currently may trigger a full garbage collection.
+// (3) If incremental marking is in progress, we perform a marking step.
GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index a7ce5dafc7..f14330ea8a 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -42,42 +42,15 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
// idle_time_in_ms. Hence, we conservatively prune our workload estimate.
static const double kConservativeTimeRatio;
- // If we haven't recorded any mark-compact events yet, we use
- // conservative lower bound for the mark-compact speed.
- static const size_t kInitialConservativeMarkCompactSpeed = 2 * MB;
-
- // If we haven't recorded any final incremental mark-compact events yet, we
- // use conservative lower bound for the mark-compact speed.
- static const size_t kInitialConservativeFinalIncrementalMarkCompactSpeed =
- 2 * MB;
-
- // Maximum final incremental mark-compact time returned by
- // EstimateFinalIncrementalMarkCompactTime.
- static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
-
// This is the maximum scheduled idle time. Note that it can be more than
// 16.66 ms when there is currently no rendering going on.
static const size_t kMaxScheduledIdleTime = 50;
- // The maximum idle time when frames are rendered is 16.66ms.
- static const size_t kMaxFrameRenderingIdleTime = 17;
-
- static const int kMinBackgroundIdleTime = 900;
-
- // An allocation throughput below kLowAllocationThroughput bytes/ms is
- // considered low
- static const size_t kLowAllocationThroughput = 1000;
-
static const size_t kMaxHeapSizeForContextDisposalMarkCompact = 100 * MB;
// If contexts are disposed at a higher rate a full gc is triggered.
static const double kHighContextDisposalRate;
- // Incremental marking step time.
- static const size_t kIncrementalMarkingStepTimeInMs = 1;
-
- static const size_t kMinTimeForOverApproximatingWeakClosureInMs;
-
GCIdleTimeHandler() = default;
GCIdleTimeAction Compute(double idle_time_in_ms,
@@ -95,12 +68,6 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
double contexts_disposal_rate,
size_t size_of_objects);
- static bool ShouldDoFinalIncrementalMarkCompact(
- double idle_time_in_ms, size_t size_of_objects,
- double final_incremental_mark_compact_speed_in_bytes_per_ms);
-
- static bool ShouldDoOverApproximateWeakClosure(double idle_time_in_ms);
-
private:
DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
};
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index b165b69882..f023bd7c74 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -518,11 +518,12 @@ void GCTracer::Print() const {
Output(
"[%d:%p] "
"%8.0f ms: "
- "%s %.1f (%.1f) -> %.1f (%.1f) MB, "
+ "%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
"%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
+ current_.reduce_memory ? " (reduce)" : "",
static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB,
static_cast<double>(current_.end_object_size) / MB,
@@ -931,6 +932,19 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
}
}
+void GCTracer::RecordTimeToIncrementalMarkingTask(double time_to_task) {
+ if (average_time_to_incremental_marking_task_ == 0.0) {
+ average_time_to_incremental_marking_task_ = time_to_task;
+ } else {
+ average_time_to_incremental_marking_task_ =
+ (average_time_to_incremental_marking_task_ + time_to_task) / 2;
+ }
+}
+
+double GCTracer::AverageTimeToIncrementalMarkingTask() const {
+ return average_time_to_incremental_marking_task_;
+}
+
void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) {
if (duration == 0 || bytes == 0) return;
double current_speed = bytes / duration;
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 454bb9ff17..6ff6e18a59 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -353,6 +353,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordEmbedderSpeed(size_t bytes, double duration);
+ // Returns the average time between scheduling and invocation of an
+ // incremental marking task.
+ double AverageTimeToIncrementalMarkingTask() const;
+ void RecordTimeToIncrementalMarkingTask(double time_to_task);
+
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
private:
@@ -446,6 +451,8 @@ class V8_EXPORT_PRIVATE GCTracer {
double recorded_incremental_marking_speed_;
+ double average_time_to_incremental_marking_task_ = 0.0;
+
double recorded_embedder_speed_ = 0.0;
// Incremental scopes carry more information than just the duration. The infos
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 0e5230f1e0..e618b91058 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -10,6 +10,7 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap other than src/heap/heap.h and its
// write barrier here!
+#include "src/base/atomicops.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/third-party/heap-api.h"
@@ -65,7 +66,14 @@ int64_t Heap::external_memory() {
}
void Heap::update_external_memory(int64_t delta) {
- isolate()->isolate_data()->external_memory_ += delta;
+ const int64_t amount = isolate()->isolate_data()->external_memory_ + delta;
+ isolate()->isolate_data()->external_memory_ = amount;
+ if (amount <
+ isolate()->isolate_data()->external_memory_low_since_mark_compact_) {
+ isolate()->isolate_data()->external_memory_low_since_mark_compact_ = amount;
+ isolate()->isolate_data()->external_memory_limit_ =
+ amount + kExternalAllocationSoftLimit;
+ }
}
void Heap::update_external_memory_concurrently_freed(uintptr_t freed) {
@@ -73,8 +81,8 @@ void Heap::update_external_memory_concurrently_freed(uintptr_t freed) {
}
void Heap::account_external_memory_concurrently_freed() {
- isolate()->isolate_data()->external_memory_ -=
- external_memory_concurrently_freed_;
+ update_external_memory(
+ -static_cast<int64_t>(external_memory_concurrently_freed_));
external_memory_concurrently_freed_ = 0;
}
@@ -155,6 +163,14 @@ size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
+inline const base::AddressRegion& Heap::code_range() {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ return tp_heap_->GetCodeRange();
+#else
+ return memory_allocator_->code_range();
+#endif
+}
+
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
@@ -248,13 +264,13 @@ template <Heap::AllocationRetryMode mode>
HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
DCHECK(!result.IsRetry());
return result.ToObjectChecked();
}
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_EQ(gc_state_, NOT_IN_GC);
Heap* heap = isolate()->heap();
Address* top = heap->NewSpaceAllocationTopAddress();
@@ -286,7 +302,7 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
Address Heap::DeserializerAllocate(AllocationType type, int size_in_bytes) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
AllocationResult allocation = tp_heap_->Allocate(
- size_in_bytes, type, AllocationAlignment::kWordAligned);
+ size_in_bytes, type, AllocationAlignment::kDoubleAligned);
return allocation.ToObjectChecked().ptr();
} else {
UNIMPLEMENTED(); // unimplemented
@@ -560,11 +576,27 @@ Oddball Heap::ToBoolean(bool condition) {
}
int Heap::NextScriptId() {
- int last_id = last_script_id().value();
- if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
- last_id++;
- set_last_script_id(Smi::FromInt(last_id));
- return last_id;
+ FullObjectSlot last_script_id_slot(&roots_table()[RootIndex::kLastScriptId]);
+ Smi last_id = Smi::cast(last_script_id_slot.Relaxed_Load());
+ Smi new_id, last_id_before_cas;
+ do {
+ if (last_id.value() == Smi::kMaxValue) {
+ STATIC_ASSERT(v8::UnboundScript::kNoScriptId == 0);
+ new_id = Smi::FromInt(1);
+ } else {
+ new_id = Smi::FromInt(last_id.value() + 1);
+ }
+
+ // CAS returns the old value on success, and the current value in the slot
+ // on failure. Therefore, we want to break if the returned value matches the
+ // old value (last_id), and keep looping (with the new last_id value) if it
+ // doesn't.
+ last_id_before_cas = last_id;
+ last_id =
+ Smi::cast(last_script_id_slot.Relaxed_CompareAndSwap(last_id, new_id));
+ } while (last_id != last_id_before_cas);
+
+ return new_id.value();
}
int Heap::NextDebuggingId() {
@@ -608,17 +640,21 @@ void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
base::CheckedDecrement(&backing_store_bytes_, amount);
}
+bool Heap::HasDirtyJSFinalizationRegistries() {
+ return !dirty_js_finalization_registries_list().IsUndefined(isolate());
+}
+
AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
heap_->always_allocate_scope_count_++;
}
-AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
- : AlwaysAllocateScope(isolate->heap()) {}
-
AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_--;
}
+AlwaysAllocateScopeForTesting::AlwaysAllocateScopeForTesting(Heap* heap)
+ : scope_(heap) {}
+
CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
if (heap_->write_protect_code_memory()) {
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 7b1438e6a3..8fec301fe7 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -61,6 +61,7 @@ struct MemoryChunk {
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(object.ptr() & ~kPageAlignmentMask);
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 28ff2e970d..518bbcf162 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -33,12 +33,14 @@
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
+#include "src/heap/finalization-registry-cleanup-task.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-measurement.h"
@@ -48,6 +50,7 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
+#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/stress-marking-observer.h"
@@ -182,13 +185,13 @@ struct Heap::StrongRootsList {
StrongRootsList* next;
};
-class IdleScavengeObserver : public AllocationObserver {
+class ScavengeTaskObserver : public AllocationObserver {
public:
- IdleScavengeObserver(Heap* heap, intptr_t step_size)
+ ScavengeTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
- heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ heap_->ScheduleScavengeTaskIfNeeded();
}
private:
@@ -199,12 +202,15 @@ Heap::Heap()
: isolate_(isolate()),
memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
+ safepoint_(new Safepoint(this)),
external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
set_native_contexts_list(Smi::zero());
set_allocation_sites_list(Smi::zero());
+ set_dirty_js_finalization_registries_list(Smi::zero());
+ set_dirty_js_finalization_registries_list_tail(Smi::zero());
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(kNullAddress, false);
@@ -410,7 +416,6 @@ bool Heap::HasBeenSetUp() {
return new_space_ != nullptr;
}
-
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
@@ -817,12 +822,6 @@ void Heap::GarbageCollectionPrologue() {
{
AllowHeapAllocation for_the_first_part_of_prologue;
gc_count_++;
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
}
// Reset GC statistics.
@@ -865,6 +864,14 @@ size_t Heap::SizeOfObjects() {
return total;
}
+size_t Heap::TotalGlobalHandlesSize() {
+ return isolate_->global_handles()->TotalSize();
+}
+
+size_t Heap::UsedGlobalHandlesSize() {
+ return isolate_->global_handles()->UsedSize();
+}
+
// static
const char* Heap::GetSpaceName(AllocationSpace space) {
switch (space) {
@@ -1090,10 +1097,10 @@ void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache data structure in heap instead.
- ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
+ ForeachAllocationSite(allocation_sites_list(), [](AllocationSite site) {
if (site.deopt_dependent_code()) {
site.dependent_code().MarkCodeForDeoptimization(
- isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
+ DependentCode::kAllocationSiteTenuringChangedGroup);
site.set_deopt_dependent_code(false);
}
});
@@ -1108,12 +1115,6 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
@@ -1197,18 +1198,13 @@ void Heap::GarbageCollectionEpilogue() {
ReduceNewSpaceSize();
}
- if (FLAG_harmony_weak_refs) {
+ if (FLAG_harmony_weak_refs &&
+ isolate()->host_cleanup_finalization_group_callback()) {
HandleScope handle_scope(isolate());
- while (!isolate()->heap()->dirty_js_finalization_groups().IsUndefined(
- isolate())) {
- Handle<JSFinalizationGroup> finalization_group(
- JSFinalizationGroup::cast(
- isolate()->heap()->dirty_js_finalization_groups()),
- isolate());
- isolate()->heap()->set_dirty_js_finalization_groups(
- finalization_group->next());
- finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
- isolate()->RunHostCleanupFinalizationGroupCallback(finalization_group);
+ Handle<JSFinalizationRegistry> finalization_registry;
+ while (
+ DequeueDirtyJSFinalizationRegistry().ToHandle(&finalization_registry)) {
+ isolate()->RunHostCleanupFinalizationGroupCallback(finalization_registry);
}
}
}
@@ -1250,11 +1246,9 @@ void Heap::HandleGCRequest() {
}
}
-
-void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
- DCHECK(FLAG_idle_time_scavenge);
+void Heap::ScheduleScavengeTaskIfNeeded() {
DCHECK_NOT_NULL(scavenge_job_);
- scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
+ scavenge_job_->ScheduleTaskIfNeeded(this);
}
TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
@@ -1288,17 +1282,18 @@ TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
return isolate_->counters()->gc_scavenger();
- } else {
- if (!incremental_marking()->IsStopped()) {
- if (ShouldReduceMemory()) {
- return isolate_->counters()->gc_finalize_reduce_memory();
- } else {
- return isolate_->counters()->gc_finalize();
- }
- } else {
- return isolate_->counters()->gc_compactor();
- }
}
+ if (incremental_marking()->IsStopped()) {
+ return isolate_->counters()->gc_compactor();
+ }
+ if (ShouldReduceMemory()) {
+ return isolate_->counters()->gc_finalize_reduce_memory();
+ }
+ if (incremental_marking()->IsMarking() &&
+ incremental_marking()->marking_worklists()->IsPerContextMode()) {
+ return isolate_->counters()->gc_finalize_measure_memory();
+ }
+ return isolate_->counters()->gc_finalize();
}
void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
@@ -1452,7 +1447,7 @@ void Heap::ReportExternalMemoryPressure() {
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
if (isolate()->isolate_data()->external_memory_ >
- (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
+ (isolate()->isolate_data()->external_memory_low_since_mark_compact_ +
external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
@@ -1662,6 +1657,9 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
memory_reducer_->NotifyPossibleGarbage(event);
}
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
+ if (!isolate()->context().is_null()) {
+ RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
+ }
number_of_disposed_maps_ = retained_maps().length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
@@ -1845,8 +1843,13 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
DCHECK_EQ(0, reserved_size % Map::kSize);
int num_maps = reserved_size / Map::kSize;
for (int i = 0; i < num_maps; i++) {
- AllocationResult allocation =
- map_space()->AllocateRawUnaligned(Map::kSize);
+ AllocationResult allocation;
+#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL
+ allocation = AllocateRaw(Map::kSize, AllocationType::kMap,
+ AllocationOrigin::kRuntime, kWordAligned);
+#else
+ allocation = map_space()->AllocateRawUnaligned(Map::kSize);
+#endif
HeapObject free_space;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
@@ -1873,12 +1876,24 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
DCHECK_LE(static_cast<size_t>(size),
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
+#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL
+ AllocationType type = (space == CODE_SPACE)
+ ? AllocationType::kCode
+ : (space == RO_SPACE)
+ ? AllocationType::kReadOnly
+ : AllocationType::kYoung;
+ AllocationAlignment align =
+ (space == CODE_SPACE) ? kCodeAligned : kWordAligned;
+ allocation =
+ AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
+#else
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
// The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(size);
}
+#endif
HeapObject free_space;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
@@ -1995,6 +2010,13 @@ bool Heap::PerformGarbageCollection(
}
}
+ if (FLAG_local_heaps) safepoint()->Start();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+#endif
+
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
@@ -2068,6 +2090,13 @@ bool Heap::PerformGarbageCollection(
local_embedder_heap_tracer()->TraceEpilogue();
}
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+#endif
+ if (FLAG_local_heaps) safepoint()->End();
+
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
gc_post_processing_depth_++;
@@ -2143,7 +2172,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
- isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
+ isolate()->isolate_data()->external_memory_low_since_mark_compact_ =
isolate()->isolate_data()->external_memory_;
isolate()->isolate_data()->external_memory_limit_ =
isolate()->isolate_data()->external_memory_ +
@@ -2248,7 +2277,7 @@ void Heap::MinorMarkCompact() {
LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
- AlwaysAllocateScope always_allocate(isolate());
+ AlwaysAllocateScope always_allocate(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
@@ -2287,16 +2316,8 @@ void Heap::MarkCompactPrologue() {
void Heap::CheckNewSpaceExpansionCriteria() {
- if (FLAG_experimental_new_space_growth_heuristic) {
- if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
- survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
- // Grow the size of new space if there is room to grow, and more than 10%
- // have survived the last scavenge.
- new_space_->Grow();
- survived_since_last_expansion_ = 0;
- }
- } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_->TotalCapacity()) {
+ if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
+ survived_since_last_expansion_ > new_space_->TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_->Grow();
@@ -2365,7 +2386,7 @@ void Heap::Scavenge() {
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
- AlwaysAllocateScope scope(isolate());
+ AlwaysAllocateScope scope(this);
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
@@ -2636,6 +2657,7 @@ void Heap::UpdateReferencesInExternalStringTable(
void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
ProcessNativeContexts(retainer);
ProcessAllocationSites(retainer);
+ ProcessDirtyJSFinalizationRegistries(retainer);
}
@@ -2657,9 +2679,24 @@ void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
set_allocation_sites_list(allocation_site_obj);
}
+void Heap::ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer) {
+ Object head = VisitWeakList<JSFinalizationRegistry>(
+ this, dirty_js_finalization_registries_list(), retainer);
+ set_dirty_js_finalization_registries_list(head);
+ // If the list is empty, set the tail to undefined. Otherwise the tail is set
+ // by WeakListVisitor<JSFinalizationRegistry>::VisitLiveObject.
+ if (head.IsUndefined(isolate())) {
+ set_dirty_js_finalization_registries_list_tail(head);
+ }
+}
+
void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
+ set_dirty_js_finalization_registries_list(
+ retainer->RetainAs(dirty_js_finalization_registries_list()));
+ set_dirty_js_finalization_registries_list_tail(
+ retainer->RetainAs(dirty_js_finalization_registries_list_tail()));
}
void Heap::ForeachAllocationSite(
@@ -2813,25 +2850,32 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
void* Heap::AllocateExternalBackingStore(
const std::function<void*(size_t)>& allocate, size_t byte_length) {
- size_t new_space_backing_store_bytes =
- new_space()->ExternalBackingStoreBytes();
- if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
- new_space_backing_store_bytes >= byte_length) {
- // Performing a young generation GC amortizes over the allocated backing
- // store bytes and may free enough external bytes for this allocation.
- CollectGarbage(NEW_SPACE, GarbageCollectionReason::kExternalMemoryPressure);
+ if (!always_allocate()) {
+ size_t new_space_backing_store_bytes =
+ new_space()->ExternalBackingStoreBytes();
+ if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
+ new_space_backing_store_bytes >= byte_length) {
+ // Performing a young generation GC amortizes over the allocated backing
+ // store bytes and may free enough external bytes for this allocation.
+ CollectGarbage(NEW_SPACE,
+ GarbageCollectionReason::kExternalMemoryPressure);
+ }
}
// TODO(ulan): Perform GCs proactively based on the byte_length and
// the current external backing store counters.
void* result = allocate(byte_length);
if (result) return result;
- for (int i = 0; i < 2; i++) {
- CollectGarbage(OLD_SPACE, GarbageCollectionReason::kExternalMemoryPressure);
- result = allocate(byte_length);
- if (result) return result;
+ if (!always_allocate()) {
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kExternalMemoryPressure);
+ result = allocate(byte_length);
+ if (result) return result;
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(
+ GarbageCollectionReason::kExternalMemoryPressure);
}
- isolate()->counters()->gc_last_resort_from_handles()->Increment();
- CollectAllAvailableGarbage(GarbageCollectionReason::kExternalMemoryPressure);
return allocate(byte_length);
}
@@ -2941,6 +2985,23 @@ bool Heap::CanMoveObjectStart(HeapObject object) {
return Page::FromHeapObject(object)->SweepingDone();
}
+// static
+bool Heap::InOffThreadSpace(HeapObject heap_object) {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ return false; // currently unsupported
+#else
+ Space* owner = MemoryChunk::FromHeapObject(heap_object)->owner();
+ if (owner->identity() == OLD_SPACE) {
+ // TODO(leszeks): Should we exclude compaction spaces here?
+ return static_cast<PagedSpace*>(owner)->is_off_thread_space();
+ }
+ if (owner->identity() == LO_SPACE) {
+ return static_cast<LargeObjectSpace*>(owner)->is_off_thread();
+ }
+ return false;
+#endif
+}
+
bool Heap::IsImmovable(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
// TODO(steveblackburn): For now all objects are immovable.
@@ -3909,8 +3970,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "full hash-table";
case GarbageCollectionReason::kHeapProfiler:
return "heap profiler";
- case GarbageCollectionReason::kIdleTask:
- return "idle task";
+ case GarbageCollectionReason::kTask:
+ return "task";
case GarbageCollectionReason::kLastResort:
return "last resort";
case GarbageCollectionReason::kLowMemoryNotification:
@@ -4057,6 +4118,7 @@ void Heap::Verify() {
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
+ array_buffer_sweeper()->EnsureFinished();
VerifyPointersVisitor visitor(this);
IterateRoots(&visitor, VISIT_ONLY_STRONG);
@@ -4392,6 +4454,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kBootstrapper);
if (mode != VISIT_ONLY_STRONG_IGNORE_STACK) {
isolate_->Iterate(v);
+ isolate_->global_handles()->IterateStrongStackRoots(v);
v->Synchronize(VisitorSynchronization::kTop);
}
Relocatable::Iterate(isolate_, v);
@@ -4406,6 +4469,12 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
isolate_->handle_scope_implementer()->Iterate(v);
+
+ if (FLAG_local_heaps) {
+ safepoint_->Iterate(&left_trim_visitor);
+ safepoint_->Iterate(v);
+ }
+
isolate_->IterateDeferredHandles(&left_trim_visitor);
isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -4743,12 +4812,12 @@ size_t Heap::GlobalSizeOfObjects() {
uint64_t Heap::PromotedExternalMemorySize() {
IsolateData* isolate_data = isolate()->isolate_data();
if (isolate_data->external_memory_ <=
- isolate_data->external_memory_at_last_mark_compact_) {
+ isolate_data->external_memory_low_since_mark_compact_) {
return 0;
}
return static_cast<uint64_t>(
isolate_data->external_memory_ -
- isolate_data->external_memory_at_last_mark_compact_);
+ isolate_data->external_memory_low_since_mark_compact_);
}
bool Heap::AllocationLimitOvershotByLargeMargin() {
@@ -4871,7 +4940,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
double gained_since_last_gc =
PromotedSinceLastGC() +
(isolate()->isolate_data()->external_memory_ -
- isolate()->isolate_data()->external_memory_at_last_mark_compact_);
+ isolate()->isolate_data()->external_memory_low_since_mark_compact_);
double size_before_gc =
OldGenerationObjectsAndPromotedExternalMemorySize() -
gained_since_last_gc;
@@ -5006,7 +5075,7 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
- AlwaysAllocateScope scope(isolate());
+ AlwaysAllocateScope scope(this);
alloc = AllocateRaw(size, allocation, origin, alignment);
}
if (alloc.To(&result)) {
@@ -5040,7 +5109,7 @@ HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
- AlwaysAllocateScope scope(isolate());
+ AlwaysAllocateScope scope(this);
alloc = code_lo_space()->AllocateRaw(size);
}
if (alloc.To(&result)) {
@@ -5058,8 +5127,7 @@ void Heap::SetUp() {
#endif
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
- tp_heap_ =
- third_party_heap::Heap::New(reinterpret_cast<v8::Isolate*>(isolate()));
+ tp_heap_ = third_party_heap::Heap::New(isolate());
#endif
// Initialize heap spaces and initial maps and objects.
@@ -5151,12 +5219,10 @@ void Heap::SetUpSpaces() {
}
#endif // ENABLE_MINOR_MC
- if (FLAG_idle_time_scavenge) {
- scavenge_job_.reset(new ScavengeJob());
- idle_scavenge_observer_.reset(new IdleScavengeObserver(
- this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
- new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
- }
+ scavenge_job_.reset(new ScavengeJob());
+ scavenge_task_observer_.reset(new ScavengeTaskObserver(
+ this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
+ new_space()->AddAllocationObserver(scavenge_task_observer_.get());
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
@@ -5336,11 +5402,9 @@ void Heap::TearDown() {
}
}
- if (FLAG_idle_time_scavenge) {
- new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
- idle_scavenge_observer_.reset();
- scavenge_job_.reset();
- }
+ new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
+ scavenge_task_observer_.reset();
+ scavenge_job_.reset();
if (FLAG_stress_marking > 0) {
RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
@@ -5966,6 +6030,16 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
+size_t Heap::YoungArrayBufferBytes() {
+ DCHECK(V8_ARRAY_BUFFER_EXTENSION_BOOL);
+ return array_buffer_sweeper()->YoungBytes();
+}
+
+size_t Heap::OldArrayBufferBytes() {
+ DCHECK(V8_ARRAY_BUFFER_EXTENSION_BOOL);
+ return array_buffer_sweeper()->OldBytes();
+}
+
void Heap::RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end) {
StrongRootsList* list = new StrongRootsList();
list->next = strong_roots_list_;
@@ -6002,23 +6076,96 @@ void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
set_interpreter_entry_trampoline_for_profiling(code);
}
-void Heap::AddDirtyJSFinalizationGroup(
- JSFinalizationGroup finalization_group,
+void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
+ DCHECK(!isolate()->host_cleanup_finalization_group_callback());
+ // Only one cleanup task is posted at a time.
+ if (!HasDirtyJSFinalizationRegistries() ||
+ is_finalization_registry_cleanup_task_posted_) {
+ return;
+ }
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate()));
+ auto task = std::make_unique<FinalizationRegistryCleanupTask>(this);
+ taskrunner->PostNonNestableTask(std::move(task));
+ is_finalization_registry_cleanup_task_posted_ = true;
+}
+
+void Heap::EnqueueDirtyJSFinalizationRegistry(
+ JSFinalizationRegistry finalization_registry,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot) {
- DCHECK(dirty_js_finalization_groups().IsUndefined(isolate()) ||
- dirty_js_finalization_groups().IsJSFinalizationGroup());
- DCHECK(finalization_group.next().IsUndefined(isolate()));
- DCHECK(!finalization_group.scheduled_for_cleanup());
- finalization_group.set_scheduled_for_cleanup(true);
- finalization_group.set_next(dirty_js_finalization_groups());
- gc_notify_updated_slot(
- finalization_group,
- finalization_group.RawField(JSFinalizationGroup::kNextOffset),
- dirty_js_finalization_groups());
- set_dirty_js_finalization_groups(finalization_group);
- // Roots are rescanned after objects are moved, so no need to record a slot
- // for the root pointing to the first JSFinalizationGroup.
+ // Add a FinalizationRegistry to the tail of the dirty list.
+ DCHECK(!HasDirtyJSFinalizationRegistries() ||
+ dirty_js_finalization_registries_list().IsJSFinalizationRegistry());
+ DCHECK(finalization_registry.next_dirty().IsUndefined(isolate()));
+ DCHECK(!finalization_registry.scheduled_for_cleanup());
+ finalization_registry.set_scheduled_for_cleanup(true);
+ if (dirty_js_finalization_registries_list_tail().IsUndefined(isolate())) {
+ DCHECK(dirty_js_finalization_registries_list().IsUndefined(isolate()));
+ set_dirty_js_finalization_registries_list(finalization_registry);
+ // dirty_js_finalization_registries_list_ is rescanned by
+ // ProcessWeakListRoots.
+ } else {
+ JSFinalizationRegistry tail = JSFinalizationRegistry::cast(
+ dirty_js_finalization_registries_list_tail());
+ tail.set_next_dirty(finalization_registry);
+ gc_notify_updated_slot(tail,
+ finalization_registry.RawField(
+ JSFinalizationRegistry::kNextDirtyOffset),
+ finalization_registry);
+ }
+ set_dirty_js_finalization_registries_list_tail(finalization_registry);
+ // dirty_js_finalization_registries_list_tail_ is rescanned by
+ // ProcessWeakListRoots.
+}
+
+MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
+ // Take a FinalizationRegistry from the head of the dirty list for fairness.
+ if (HasDirtyJSFinalizationRegistries()) {
+ Handle<JSFinalizationRegistry> head(
+ JSFinalizationRegistry::cast(dirty_js_finalization_registries_list()),
+ isolate());
+ set_dirty_js_finalization_registries_list(head->next_dirty());
+ head->set_next_dirty(ReadOnlyRoots(this).undefined_value());
+ if (*head == dirty_js_finalization_registries_list_tail()) {
+ set_dirty_js_finalization_registries_list_tail(
+ ReadOnlyRoots(this).undefined_value());
+ }
+ return head;
+ }
+ return {};
+}
+
+void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
+ if (!FLAG_harmony_weak_refs) return;
+ if (isolate()->host_cleanup_finalization_group_callback()) return;
+
+ DisallowHeapAllocation no_gc;
+
+ Isolate* isolate = this->isolate();
+ Object prev = ReadOnlyRoots(isolate).undefined_value();
+ Object current = dirty_js_finalization_registries_list();
+ while (!current.IsUndefined(isolate)) {
+ JSFinalizationRegistry finalization_registry =
+ JSFinalizationRegistry::cast(current);
+ if (finalization_registry.native_context() == context) {
+ if (prev.IsUndefined(isolate)) {
+ set_dirty_js_finalization_registries_list(
+ finalization_registry.next_dirty());
+ } else {
+ JSFinalizationRegistry::cast(prev).set_next_dirty(
+ finalization_registry.next_dirty());
+ }
+ finalization_registry.set_scheduled_for_cleanup(false);
+ current = finalization_registry.next_dirty();
+ finalization_registry.set_next_dirty(
+ ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ prev = current;
+ current = finalization_registry.next_dirty();
+ }
+ }
+ set_dirty_js_finalization_registries_list_tail(prev);
}
void Heap::KeepDuringJob(Handle<JSReceiver> target) {
@@ -6527,7 +6674,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
// Marking consistency.
- if (chunk->IsWritable()) {
+ if (chunk->IsWritable() && !Heap::InOffThreadSpace(object)) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 86dbab60a7..6d6eddf61a 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -70,8 +70,9 @@ class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
class Isolate;
-class JSFinalizationGroup;
+class JSFinalizationRegistry;
class LocalEmbedderHeapTracer;
+class LocalHeap;
class MemoryAllocator;
class MemoryMeasurement;
class MemoryReducer;
@@ -82,6 +83,7 @@ class Page;
class PagedSpace;
class ReadOnlyHeap;
class RootVisitor;
+class Safepoint;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
@@ -127,7 +129,7 @@ enum class GarbageCollectionReason {
kFinalizeMarkingViaTask = 9,
kFullHashtable = 10,
kHeapProfiler = 11,
- kIdleTask = 12,
+ kTask = 12,
kLastResort = 13,
kLowMemoryNotification = 14,
kMakeHeapIterable = 15,
@@ -502,6 +504,19 @@ class Heap {
}
Object allocation_sites_list() { return allocation_sites_list_; }
+ void set_dirty_js_finalization_registries_list(Object object) {
+ dirty_js_finalization_registries_list_ = object;
+ }
+ Object dirty_js_finalization_registries_list() {
+ return dirty_js_finalization_registries_list_;
+ }
+ void set_dirty_js_finalization_registries_list_tail(Object object) {
+ dirty_js_finalization_registries_list_tail_ = object;
+ }
+ Object dirty_js_finalization_registries_list_tail() {
+ return dirty_js_finalization_registries_list_tail_;
+ }
+
// Used in CreateAllocationSiteStub and the (de)serializer.
Address allocation_sites_list_address() {
return reinterpret_cast<Address>(&allocation_sites_list_);
@@ -605,6 +620,8 @@ class Heap {
void AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension);
+ Safepoint* safepoint() { return safepoint_.get(); }
+
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -644,6 +661,9 @@ class Heap {
V8_INLINE void update_external_memory_concurrently_freed(uintptr_t freed);
V8_INLINE void account_external_memory_concurrently_freed();
+ V8_EXPORT_PRIVATE size_t YoungArrayBufferBytes();
+ V8_EXPORT_PRIVATE size_t OldArrayBufferBytes();
+
size_t backing_store_bytes() const { return backing_store_bytes_; }
void CompactWeakArrayLists(AllocationType allocation);
@@ -763,6 +783,8 @@ class Heap {
return array_buffer_sweeper_.get();
}
+ const base::AddressRegion& code_range();
+
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
@@ -795,12 +817,31 @@ class Heap {
// See also: FLAG_interpreted_frames_native_stack.
void SetInterpreterEntryTrampolineForProfiling(Code code);
- // Add finalization_group into the dirty_js_finalization_groups list.
- void AddDirtyJSFinalizationGroup(
- JSFinalizationGroup finalization_group,
+ void EnqueueDirtyJSFinalizationRegistry(
+ JSFinalizationRegistry finalization_registry,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
+ MaybeHandle<JSFinalizationRegistry> DequeueDirtyJSFinalizationRegistry();
+
+ // Called from Heap::NotifyContextDisposed to remove all
+ // FinalizationRegistries with {context} from the dirty list when the context
+ // e.g. navigates away or is detached. If the dirty list is empty afterwards,
+ // the cleanup task is aborted if needed.
+ void RemoveDirtyFinalizationRegistriesOnContext(NativeContext context);
+
+ inline bool HasDirtyJSFinalizationRegistries();
+
+ void PostFinalizationRegistryCleanupTaskIfNeeded();
+
+ void set_is_finalization_registry_cleanup_task_posted(bool posted) {
+ is_finalization_registry_cleanup_task_posted_ = posted;
+ }
+
+ bool is_finalization_registry_cleanup_task_posted() {
+ return is_finalization_registry_cleanup_task_posted_;
+ }
+
V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
void ClearKeptObjects();
@@ -1043,6 +1084,8 @@ class Heap {
static inline bool InToPage(MaybeObject object);
static inline bool InToPage(HeapObject heap_object);
+ V8_EXPORT_PRIVATE static bool InOffThreadSpace(HeapObject heap_object);
+
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
@@ -1156,9 +1199,15 @@ class Heap {
// all available bytes. Check MaxHeapObjectSize() instead.
size_t Available();
- // Returns of size of all objects residing in the heap.
+ // Returns size of all objects residing in the heap.
V8_EXPORT_PRIVATE size_t SizeOfObjects();
+ // Returns size of all global handles in the heap.
+ V8_EXPORT_PRIVATE size_t TotalGlobalHandlesSize();
+
+ // Returns size of all allocated/used global handles in the heap.
+ V8_EXPORT_PRIVATE size_t UsedGlobalHandlesSize();
+
void UpdateSurvivalStatistics(int start_new_space_size);
inline void IncrementPromotedObjectsSize(size_t object_size) {
@@ -1518,7 +1567,6 @@ class Heap {
static const int kOldSurvivalRateLowThreshold = 10;
static const int kMaxMarkCompactsInIdleRound = 7;
- static const int kIdleScavengeThreshold = 5;
static const int kInitialFeedbackCapacity = 256;
@@ -1709,6 +1757,7 @@ class Heap {
void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
void ProcessNativeContexts(WeakObjectRetainer* retainer);
void ProcessAllocationSites(WeakObjectRetainer* retainer);
+ void ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer);
void ProcessWeakListRoots(WeakObjectRetainer* retainer);
// ===========================================================================
@@ -1783,7 +1832,12 @@ class Heap {
// ===========================================================================
bool RecentIdleNotificationHappened();
- void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
+
+ // ===========================================================================
+ // GC Tasks. =================================================================
+ // ===========================================================================
+
+ void ScheduleScavengeTaskIfNeeded();
// ===========================================================================
// Allocation methods. =======================================================
@@ -2013,6 +2067,9 @@ class Heap {
// List heads are initialized lazily and contain the undefined_value at start.
Object native_contexts_list_;
Object allocation_sites_list_;
+ Object dirty_js_finalization_registries_list_;
+ // Weak list tails.
+ Object dirty_js_finalization_registries_list_tail_;
std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
std::vector<GCCallbackTuple> gc_prologue_callbacks_;
@@ -2062,7 +2119,7 @@ class Heap {
std::unique_ptr<ObjectStats> live_object_stats_;
std::unique_ptr<ObjectStats> dead_object_stats_;
std::unique_ptr<ScavengeJob> scavenge_job_;
- std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
+ std::unique_ptr<AllocationObserver> scavenge_task_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
StrongRootsList* strong_roots_list_ = nullptr;
@@ -2109,6 +2166,8 @@ class Heap {
GCCallbackFlags current_gc_callback_flags_ =
GCCallbackFlags::kNoGCCallbackFlags;
+ std::unique_ptr<Safepoint> safepoint_;
+
bool is_current_gc_forced_ = false;
ExternalStringTable external_string_table_;
@@ -2149,6 +2208,8 @@ class Heap {
std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
+ bool is_finalization_registry_cleanup_task_posted_ = false;
+
std::unique_ptr<third_party_heap::Heap> tp_heap_;
// Classes in "heap" can be friends.
@@ -2159,7 +2220,7 @@ class Heap {
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapObjectIterator;
- friend class IdleScavengeObserver;
+ friend class ScavengeTaskObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class OldLargeObjectSpace;
@@ -2227,16 +2288,33 @@ class HeapStats {
intptr_t* end_marker; // 27
};
+// Disables GC for all allocations. It should not be used
+// outside heap, deserializer, and isolate bootstrap.
+// Use AlwaysAllocateScopeForTesting in tests.
class AlwaysAllocateScope {
public:
- explicit inline AlwaysAllocateScope(Heap* heap);
- explicit inline AlwaysAllocateScope(Isolate* isolate);
inline ~AlwaysAllocateScope();
private:
+ friend class AlwaysAllocateScopeForTesting;
+ friend class Deserializer;
+ friend class DeserializerAllocator;
+ friend class Evacuator;
+ friend class Heap;
+ friend class Isolate;
+
+ explicit inline AlwaysAllocateScope(Heap* heap);
Heap* heap_;
};
+class AlwaysAllocateScopeForTesting {
+ public:
+ explicit inline AlwaysAllocateScopeForTesting(Heap* heap);
+
+ private:
+ AlwaysAllocateScope scope_;
+};
+
// The CodeSpaceMemoryModificationScope can only be used by the main thread.
class CodeSpaceMemoryModificationScope {
public:
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 1f924ff139..63e47ca313 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/embedder-tracing.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
@@ -18,8 +19,7 @@ namespace internal {
class IncrementalMarkingJob::Task : public CancelableTask {
public:
- static StepResult Step(Heap* heap,
- EmbedderHeapTracer::EmbedderStackState stack_state);
+ static StepResult Step(Heap* heap);
Task(Isolate* isolate, IncrementalMarkingJob* job,
EmbedderHeapTracer::EmbedderStackState stack_state, TaskType task_type)
@@ -52,48 +52,39 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+ const EmbedderHeapTracer::EmbedderStackState stack_state =
+ taskrunner->NonNestableTasksEnabled()
+ ? EmbedderHeapTracer::EmbedderStackState::kEmpty
+ : EmbedderHeapTracer::EmbedderStackState::kUnknown;
+ auto task =
+ std::make_unique<Task>(heap->isolate(), this, stack_state, task_type);
if (task_type == TaskType::kNormal) {
+ scheduled_time_ = heap->MonotonicallyIncreasingTimeInMs();
if (taskrunner->NonNestableTasksEnabled()) {
- taskrunner->PostNonNestableTask(std::make_unique<Task>(
- heap->isolate(), this,
- EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
+ taskrunner->PostNonNestableTask(std::move(task));
} else {
- taskrunner->PostTask(std::make_unique<Task>(
- heap->isolate(), this,
- EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
+ taskrunner->PostTask(std::move(task));
}
} else {
if (taskrunner->NonNestableDelayedTasksEnabled()) {
- taskrunner->PostNonNestableDelayedTask(
- std::make_unique<Task>(
- heap->isolate(), this,
- EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type),
- kDelayInSeconds);
+ taskrunner->PostNonNestableDelayedTask(std::move(task),
+ kDelayInSeconds);
} else {
- taskrunner->PostDelayedTask(
- std::make_unique<Task>(
- heap->isolate(), this,
- EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
- kDelayInSeconds);
+ taskrunner->PostDelayedTask(std::move(task), kDelayInSeconds);
}
}
}
}
-StepResult IncrementalMarkingJob::Task::Step(
- Heap* heap, EmbedderHeapTracer::EmbedderStackState stack_state) {
+StepResult IncrementalMarkingJob::Task::Step(Heap* heap) {
const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
StepResult result = heap->incremental_marking()->AdvanceWithDeadline(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kTask);
- {
- EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
- stack_state);
- heap->FinalizeIncrementalMarkingIfComplete(
- GarbageCollectionReason::kFinalizeMarkingViaTask);
- }
+ heap->FinalizeIncrementalMarkingIfComplete(
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
return result;
}
@@ -102,12 +93,19 @@ void IncrementalMarkingJob::Task::RunInternal() {
TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
Heap* heap = isolate()->heap();
+ EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
+ stack_state_);
+ if (task_type_ == TaskType::kNormal) {
+ heap->tracer()->RecordTimeToIncrementalMarkingTask(
+ heap->MonotonicallyIncreasingTimeInMs() - job_->scheduled_time_);
+ job_->scheduled_time_ = 0.0;
+ }
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
heap->StartIncrementalMarking(heap->GCFlagsForIncrementalMarking(),
- GarbageCollectionReason::kIdleTask,
+ GarbageCollectionReason::kTask,
kGCCallbackScheduleIdleGarbageCollection);
}
}
@@ -117,14 +115,23 @@ void IncrementalMarkingJob::Task::RunInternal() {
job_->SetTaskPending(task_type_, false);
if (!incremental_marking->IsStopped()) {
- StepResult step_result = Step(heap, stack_state_);
+ StepResult step_result = Step(heap);
if (!incremental_marking->IsStopped()) {
- job_->ScheduleTask(heap, step_result == StepResult::kNoImmediateWork
- ? TaskType::kDelayed
- : TaskType::kNormal);
+ const TaskType task_type =
+ incremental_marking->finalize_marking_completed() ||
+ step_result != StepResult::kNoImmediateWork
+ ? TaskType::kNormal
+ : TaskType::kDelayed;
+ job_->ScheduleTask(heap, task_type);
}
}
}
+double IncrementalMarkingJob::CurrentTimeToTask(Heap* heap) const {
+ if (scheduled_time_ == 0.0) return 0.0;
+
+ return heap->MonotonicallyIncreasingTimeInMs() - scheduled_time_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index 145f1dca64..ed133e88e5 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -16,7 +16,7 @@ class Isolate;
// The incremental marking job uses platform tasks to perform incremental
// marking steps. The job posts a foreground task that makes a small (~1ms)
// step and posts another task until the marking is completed.
-class IncrementalMarkingJob {
+class IncrementalMarkingJob final {
public:
enum class TaskType { kNormal, kDelayed };
@@ -26,14 +26,17 @@ class IncrementalMarkingJob {
void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal);
- private:
- class Task;
- static constexpr double kDelayInSeconds = 10.0 / 1000.0;
+ double CurrentTimeToTask(Heap* heap) const;
- bool IsTaskPending(TaskType task_type) {
+ bool IsTaskPending(TaskType task_type) const {
return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_;
}
+
+ private:
+ class Task;
+ static constexpr double kDelayInSeconds = 10.0 / 1000.0;
+
void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) {
normal_task_pending_ = value;
@@ -42,6 +45,7 @@ class IncrementalMarkingJob {
}
}
+ double scheduled_time_ = 0.0;
bool normal_task_pending_ = false;
bool delayed_task_pending_ = false;
};
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 29faad471c..76fdbc80c8 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -29,6 +29,7 @@
#include "src/objects/transitions-inl.h"
#include "src/objects/visitors.h"
#include "src/tracing/trace-event.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -50,17 +51,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap,
: heap_(heap),
collector_(heap->mark_compact_collector()),
weak_objects_(weak_objects),
- initial_old_generation_size_(0),
- bytes_marked_(0),
- scheduled_bytes_to_mark_(0),
- schedule_update_time_ms_(0),
- bytes_marked_concurrently_(0),
- is_compacting_(false),
- should_hurry_(false),
- was_activated_(false),
- black_allocation_(false),
- finalize_marking_completed_(false),
- request_type_(NONE),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
SetState(STOPPED);
@@ -285,13 +275,13 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
heap_->tracer()->NotifyIncrementalMarkingStart();
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+ time_to_force_completion_ = 0.0;
initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
bytes_marked_ = 0;
scheduled_bytes_to_mark_ = 0;
schedule_update_time_ms_ = start_time_ms_;
bytes_marked_concurrently_ = 0;
- should_hurry_ = false;
was_activated_ = true;
{
@@ -680,18 +670,25 @@ void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
}
}
-StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
- if (!ShouldDoEmbedderStep()) return StepResult::kNoImmediateWork;
+StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
+ double* duration_ms) {
+ if (!ShouldDoEmbedderStep()) {
+ *duration_ms = 0.0;
+ return StepResult::kNoImmediateWork;
+ }
constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
- double deadline = heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
+ LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
+ const double start = heap_->MonotonicallyIncreasingTimeInMs();
+ const double deadline = start + expected_duration_ms;
+ double current;
bool empty_worklist;
+ bool remote_tracing_done = false;
do {
{
- LocalEmbedderHeapTracer::ProcessingScope scope(
- heap_->local_embedder_heap_tracer());
+ LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
HeapObject object;
size_t cnt = 0;
empty_worklist = true;
@@ -704,20 +701,17 @@ StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
}
}
}
- heap_->local_embedder_heap_tracer()->Trace(deadline);
- } while (!empty_worklist &&
- (heap_->MonotonicallyIncreasingTimeInMs() < deadline));
- heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
- return empty_worklist ? StepResult::kNoImmediateWork
- : StepResult::kMoreWorkRemaining;
+ remote_tracing_done = local_tracer->Trace(deadline);
+ current = heap_->MonotonicallyIncreasingTimeInMs();
+ } while (!empty_worklist && !remote_tracing_done && (current < deadline));
+ local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
+ *duration_ms = current - start;
+ return (empty_worklist && remote_tracing_done)
+ ? StepResult::kNoImmediateWork
+ : StepResult::kMoreWorkRemaining;
}
void IncrementalMarking::Hurry() {
- // A scavenge may have pushed new objects on the marking deque (due to black
- // allocation) even in COMPLETE state. This may happen if scavenges are
- // forced e.g. in tests. It should not happen when COMPLETE was set when
- // incremental marking finished and a regular GC was triggered after that
- // because should_hurry_ will force a full GC.
if (!marking_worklists()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
@@ -765,7 +759,6 @@ void IncrementalMarking::Stop() {
}
}
- IncrementalMarking::set_should_hurry(false);
heap_->isolate()->stack_guard()->ClearGC();
SetState(STOPPED);
is_compacting_ = false;
@@ -792,15 +785,66 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) {
}
}
+double IncrementalMarking::CurrentTimeToMarkingTask() const {
+ const double recorded_time_to_marking_task =
+ heap_->tracer()->AverageTimeToIncrementalMarkingTask();
+ const double current_time_to_marking_task =
+ incremental_marking_job_.CurrentTimeToTask(heap_);
+ if (recorded_time_to_marking_task == 0.0) return 0.0;
+ return Max(recorded_time_to_marking_task, current_time_to_marking_task);
+}
void IncrementalMarking::MarkingComplete(CompletionAction action) {
+ // Allowed overshoot percantage of incremental marking walltime.
+ constexpr double kAllowedOvershoot = 0.1;
+ // Minimum overshoot in ms. This is used to allow moving away from stack when
+ // marking was fast.
+ constexpr double kMinOvershootMs = 50;
+
+ if (action == GC_VIA_STACK_GUARD) {
+ if (time_to_force_completion_ == 0.0) {
+ const double now = heap_->MonotonicallyIncreasingTimeInMs();
+ const double overshoot_ms =
+ Max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
+ const double time_to_marking_task = CurrentTimeToMarkingTask();
+ if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Not delaying marking completion. time to "
+ "task: %fms allowed overshoot: %fms\n",
+ time_to_marking_task, overshoot_ms);
+ }
+ } else {
+ time_to_force_completion_ = now + overshoot_ms;
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Delaying GC via stack guard. time to task: "
+ "%fms "
+ "allowed overshoot: %fms\n",
+ time_to_marking_task, overshoot_ms);
+ }
+ incremental_marking_job_.ScheduleTask(
+ heap(), IncrementalMarkingJob::TaskType::kNormal);
+ return;
+ }
+ }
+ if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Delaying GC via stack guard. time left: "
+ "%fms\n",
+ time_to_force_completion_ -
+ heap_->MonotonicallyIncreasingTimeInMs());
+ }
+ return;
+ }
+ }
+
SetState(COMPLETE);
// We will set the stack guard to request a GC now. This will mean the rest
// of the GC gets performed as soon as possible (we can't do a GC here in a
// record-write context). If a few things get allocated between now and then
- // that shouldn't make us do a scavenge and keep being incremental, so we set
- // the should-hurry flag to indicate that there can't be much work left to do.
- set_should_hurry(true);
+ // that shouldn't make us do a scavenge and keep being incremental.
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (normal).\n");
@@ -811,7 +855,6 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
}
}
-
void IncrementalMarking::Epilogue() {
was_activated_ = false;
finalize_marking_completed_ = false;
@@ -863,12 +906,11 @@ void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
namespace {
StepResult CombineStepResults(StepResult a, StepResult b) {
+ DCHECK_NE(StepResult::kWaitingForFinalization, a);
+ DCHECK_NE(StepResult::kWaitingForFinalization, b);
if (a == StepResult::kMoreWorkRemaining ||
b == StepResult::kMoreWorkRemaining)
return StepResult::kMoreWorkRemaining;
- if (a == StepResult::kWaitingForFinalization ||
- b == StepResult::kWaitingForFinalization)
- return StepResult::kWaitingForFinalization;
return StepResult::kNoImmediateWork;
}
} // anonymous namespace
@@ -884,22 +926,7 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
FastForwardScheduleIfCloseToFinalization();
-
- double remaining_time_in_ms = 0.0;
- StepResult result;
- do {
- StepResult v8_result =
- V8Step(kStepSizeInMs / 2, completion_action, step_origin);
- remaining_time_in_ms =
- deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- StepResult embedder_result =
- EmbedderStep(Min(kStepSizeInMs, remaining_time_in_ms));
- result = CombineStepResults(v8_result, embedder_result);
- remaining_time_in_ms =
- deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (remaining_time_in_ms >= kStepSizeInMs &&
- result == StepResult::kMoreWorkRemaining);
- return result;
+ return Step(kStepSizeInMs, completion_action, step_origin);
}
void IncrementalMarking::FinalizeSweeping() {
@@ -1015,13 +1042,12 @@ void IncrementalMarking::AdvanceOnAllocation() {
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
ScheduleBytesToMarkBasedOnAllocation();
- V8Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
+ Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
-StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
- CompletionAction action,
- StepOrigin step_origin) {
- StepResult result = StepResult::kMoreWorkRemaining;
+StepResult IncrementalMarking::Step(double max_step_size_in_ms,
+ CompletionAction action,
+ StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@@ -1029,7 +1055,11 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
FinalizeSweeping();
}
- size_t bytes_processed = 0, bytes_to_process = 0;
+ StepResult combined_result = StepResult::kMoreWorkRemaining;
+ size_t bytes_to_process = 0;
+ size_t v8_bytes_processed = 0;
+ double embedder_duration = 0.0;
+ double embedder_deadline = 0.0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
heap_->new_space()->ResetOriginalTop();
@@ -1054,34 +1084,42 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
}
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
+ const double marking_speed =
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
- max_step_size_in_ms,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ max_step_size_in_ms, marking_speed);
bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
- if (bytes_to_process == 0) {
- result = StepResult::kNoImmediateWork;
+ bytes_to_process = Max(bytes_to_process, kMinStepSizeInBytes);
+
+ // Perform a single V8 and a single embedder step. In case both have been
+ // observed as empty back to back, we can finalize.
+ //
+ // This ignores that case where the embedder finds new V8-side objects. The
+ // assumption is that large graphs are well connected and can mostly be
+ // processed on their own. For small graphs, helping is not necessary.
+ v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
+ StepResult v8_result = marking_worklists()->IsEmpty()
+ ? StepResult::kNoImmediateWork
+ : StepResult::kMoreWorkRemaining;
+ StepResult embedder_result = StepResult::kNoImmediateWork;
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ embedder_deadline =
+ Min(max_step_size_in_ms,
+ static_cast<double>(bytes_to_process) / marking_speed);
+ embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
}
-
- bytes_processed = collector_->ProcessMarkingWorklist(
- Max(bytes_to_process, kMinStepSizeInBytes));
-
- bytes_marked_ += bytes_processed;
-
- if (marking_worklists()->IsEmpty()) {
- result = StepResult::kNoImmediateWork;
- if (heap_->local_embedder_heap_tracer()
- ->ShouldFinalizeIncrementalMarking()) {
- if (!finalize_marking_completed_) {
- FinalizeMarking(action);
- FastForwardSchedule();
- result = StepResult::kWaitingForFinalization;
- incremental_marking_job()->Start(heap_);
- } else {
- MarkingComplete(action);
- result = StepResult::kWaitingForFinalization;
- }
+ bytes_marked_ += v8_bytes_processed;
+ combined_result = CombineStepResults(v8_result, embedder_result);
+
+ if (combined_result == StepResult::kNoImmediateWork) {
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
+ FastForwardSchedule();
+ combined_result = StepResult::kWaitingForFinalization;
+ incremental_marking_job()->Start(heap_);
} else {
- heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
+ MarkingComplete(action);
+ combined_result = StepResult::kWaitingForFinalization;
}
}
if (FLAG_concurrent_marking) {
@@ -1089,22 +1127,22 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
}
-
- double end = heap_->MonotonicallyIncreasingTimeInMs();
- double duration = (end - start);
if (state_ == MARKING) {
- // Note that we report zero bytes here when sweeping was in progress or
- // when we just started incremental marking. In these cases we did not
- // process the marking deque.
- heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+ // Note that we do not report any marked by in case of finishing sweeping as
+ // we did not process the marking worklist.
+ const double v8_duration =
+ heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
+ heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Step %s %zuKB (%zuKB) in %.1f\n",
+ "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
+ "in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
- bytes_processed / KB, bytes_to_process / KB, duration);
+ v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
+ embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
}
- return result;
+ return combined_result;
}
} // namespace internal
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index bd2b93efe2..04000be352 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -26,7 +26,7 @@ enum class StepResult {
kWaitingForFinalization
};
-class V8_EXPORT_PRIVATE IncrementalMarking {
+class V8_EXPORT_PRIVATE IncrementalMarking final {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
@@ -105,9 +105,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
return state_;
}
- bool should_hurry() const { return should_hurry_; }
- void set_should_hurry(bool val) { should_hurry_ = val; }
-
bool finalize_marking_completed() const {
return finalize_marking_completed_;
}
@@ -172,11 +169,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeSweeping();
- StepResult V8Step(double max_step_size_in_ms, CompletionAction action,
- StepOrigin step_origin);
+ StepResult Step(double max_step_size_in_ms, CompletionAction action,
+ StepOrigin step_origin);
bool ShouldDoEmbedderStep();
- StepResult EmbedderStep(double duration);
+ StepResult EmbedderStep(double expected_duration_ms, double* duration_ms);
V8_INLINE void RestartIfNotMarking();
@@ -301,32 +298,34 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
heap_->SetIsMarkingFlag(s >= MARKING);
}
+ double CurrentTimeToMarkingTask() const;
+
Heap* const heap_;
MarkCompactCollector* const collector_;
WeakObjects* weak_objects_;
- double start_time_ms_;
- size_t initial_old_generation_size_;
- size_t old_generation_allocation_counter_;
- size_t bytes_marked_;
- size_t scheduled_bytes_to_mark_;
- double schedule_update_time_ms_;
+ double start_time_ms_ = 0.0;
+ double time_to_force_completion_ = 0.0;
+ size_t initial_old_generation_size_ = 0;
+ size_t old_generation_allocation_counter_ = 0;
+ size_t bytes_marked_ = 0;
+ size_t scheduled_bytes_to_mark_ = 0;
+ double schedule_update_time_ms_ = 0.0;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
- size_t bytes_marked_concurrently_;
+ size_t bytes_marked_concurrently_ = 0;
// Must use SetState() above to update state_
State state_;
- bool is_compacting_;
- bool should_hurry_;
- bool was_activated_;
- bool black_allocation_;
- bool finalize_marking_completed_;
+ bool is_compacting_ = false;
+ bool was_activated_ = false;
+ bool black_allocation_ = false;
+ bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_;
- GCRequestType request_type_;
+ GCRequestType request_type_ = NONE;
Observer new_generation_observer_;
Observer old_generation_observer_;
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
new file mode 100644
index 0000000000..392b343236
--- /dev/null
+++ b/deps/v8/src/heap/local-heap.cc
@@ -0,0 +1,71 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/local-heap.h"
+#include "src/handles/local-handles.h"
+#include "src/heap/heap.h"
+#include "src/heap/safepoint.h"
+
+namespace v8 {
+namespace internal {
+
+LocalHeap::LocalHeap(Heap* heap)
+ : heap_(heap),
+ state_(ThreadState::Running),
+ safepoint_requested_(false),
+ prev_(nullptr),
+ next_(nullptr),
+ handles_(new LocalHandles) {
+ heap_->safepoint()->AddLocalHeap(this);
+}
+
+LocalHeap::~LocalHeap() {
+ // Park thread since removing the local heap could block.
+ EnsureParkedBeforeDestruction();
+
+ heap_->safepoint()->RemoveLocalHeap(this);
+}
+
+void LocalHeap::Park() {
+ base::MutexGuard guard(&state_mutex_);
+ CHECK(state_ == ThreadState::Running);
+ state_ = ThreadState::Parked;
+ state_change_.NotifyAll();
+}
+
+void LocalHeap::Unpark() {
+ base::MutexGuard guard(&state_mutex_);
+ CHECK(state_ == ThreadState::Parked);
+ state_ = ThreadState::Running;
+}
+
+void LocalHeap::EnsureParkedBeforeDestruction() {
+ base::MutexGuard guard(&state_mutex_);
+ state_ = ThreadState::Parked;
+ state_change_.NotifyAll();
+}
+
+void LocalHeap::RequestSafepoint() {
+ safepoint_requested_.store(true, std::memory_order_relaxed);
+}
+
+bool LocalHeap::IsSafepointRequested() {
+ return safepoint_requested_.load(std::memory_order_relaxed);
+}
+
+void LocalHeap::Safepoint() {
+ if (IsSafepointRequested()) {
+ ClearSafepointRequested();
+ EnterSafepoint();
+ }
+}
+
+void LocalHeap::ClearSafepointRequested() {
+ safepoint_requested_.store(false, std::memory_order_relaxed);
+}
+
+void LocalHeap::EnterSafepoint() { heap_->safepoint()->EnterFromThread(this); }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
new file mode 100644
index 0000000000..a6eed1d928
--- /dev/null
+++ b/deps/v8/src/heap/local-heap.h
@@ -0,0 +1,89 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_LOCAL_HEAP_H_
+#define V8_HEAP_LOCAL_HEAP_H_
+
+#include <atomic>
+#include <memory>
+
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Safepoint;
+class LocalHandles;
+
+class LocalHeap {
+ public:
+ V8_EXPORT_PRIVATE explicit LocalHeap(Heap* heap);
+ V8_EXPORT_PRIVATE ~LocalHeap();
+
+ // Invoked by main thread to signal this thread that it needs to halt in a
+ // safepoint.
+ void RequestSafepoint();
+
+ // Frequently invoked by local thread to check whether safepoint was requested
+ // from the main thread.
+ V8_EXPORT_PRIVATE void Safepoint();
+
+ LocalHandles* handles() { return handles_.get(); }
+
+ private:
+ enum class ThreadState {
+ // Threads in this state need to be stopped in a safepoint.
+ Running,
+ // Thread was parked, which means that the thread is not allowed to access
+ // or manipulate the heap in any way.
+ Parked,
+ // Thread was stopped in a safepoint.
+ Safepoint
+ };
+
+ V8_EXPORT_PRIVATE void Park();
+ V8_EXPORT_PRIVATE void Unpark();
+ void EnsureParkedBeforeDestruction();
+
+ bool IsSafepointRequested();
+ void ClearSafepointRequested();
+
+ void EnterSafepoint();
+
+ Heap* heap_;
+
+ base::Mutex state_mutex_;
+ base::ConditionVariable state_change_;
+ ThreadState state_;
+
+ std::atomic<bool> safepoint_requested_;
+
+ LocalHeap* prev_;
+ LocalHeap* next_;
+
+ std::unique_ptr<LocalHandles> handles_;
+
+ friend class Heap;
+ friend class Safepoint;
+ friend class ParkedScope;
+};
+
+class ParkedScope {
+ public:
+ explicit ParkedScope(LocalHeap* local_heap) : local_heap_(local_heap) {
+ local_heap_->Park();
+ }
+
+ ~ParkedScope() { local_heap_->Unpark(); }
+
+ private:
+ LocalHeap* local_heap_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_LOCAL_HEAP_H_
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 3c90344e68..295982beb5 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -205,8 +205,11 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- map = Map::cast(ObjectSlot(addr).Acquire_Load());
+ Object map_object = ObjectSlot(addr).Acquire_Load();
+ CHECK(map_object.IsMap());
+ map = Map::cast(map_object);
size = black_object.SizeFromMap(map);
+ CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
@@ -232,9 +235,12 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
object = black_object;
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
- map = Map::cast(ObjectSlot(addr).Acquire_Load());
+ Object map_object = ObjectSlot(addr).Acquire_Load();
+ CHECK(map_object.IsMap());
+ map = Map::cast(map_object);
object = HeapObject::FromAddress(addr);
size = object.SizeFromMap(map);
+ CHECK_LE(addr + size, chunk_->area_end());
}
// We found a live object.
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 2113f89ba6..7a87adb5f6 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -387,8 +387,9 @@ int NumberOfAvailableCores() {
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
- int tasks =
- FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
+ int tasks = FLAG_parallel_compaction ? Min(NumberOfAvailableCores(),
+ pages / (MB / Page::kPageSize) + 1)
+ : 1;
if (!heap_->CanExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit.
@@ -2217,7 +2218,7 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Initialize the uncompiled data.
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
- uncompiled_data.Init(
+ uncompiled_data.InitAfterBytecodeFlush(
inferred_name, start_position, end_position,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
@@ -2493,18 +2494,18 @@ void MarkCompactCollector::ClearJSWeakRefs() {
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
DCHECK(!target.IsUndefined());
// The value of the WeakCell is dead.
- JSFinalizationGroup finalization_group =
- JSFinalizationGroup::cast(weak_cell.finalization_group());
- if (!finalization_group.scheduled_for_cleanup()) {
- heap()->AddDirtyJSFinalizationGroup(finalization_group,
- gc_notify_updated_slot);
+ JSFinalizationRegistry finalization_registry =
+ JSFinalizationRegistry::cast(weak_cell.finalization_registry());
+ if (!finalization_registry.scheduled_for_cleanup()) {
+ heap()->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
+ gc_notify_updated_slot);
}
- // We're modifying the pointers in WeakCell and JSFinalizationGroup during
- // GC; thus we need to record the slots it writes. The normal write
+ // We're modifying the pointers in WeakCell and JSFinalizationRegistry
+ // during GC; thus we need to record the slots it writes. The normal write
// barrier is not enough, since it's disabled before GC.
weak_cell.Nullify(isolate(), gc_notify_updated_slot);
- DCHECK(finalization_group.NeedsCleanup());
- DCHECK(finalization_group.scheduled_for_cleanup());
+ DCHECK(finalization_registry.NeedsCleanup());
+ DCHECK(finalization_registry.scheduled_for_cleanup());
} else {
// The value of the WeakCell is alive.
ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
@@ -2520,9 +2521,9 @@ void MarkCompactCollector::ClearJSWeakRefs() {
// WeakCell. Like above, we're modifying pointers during GC, so record the
// slots.
HeapObject undefined = ReadOnlyRoots(isolate()).undefined_value();
- JSFinalizationGroup finalization_group =
- JSFinalizationGroup::cast(weak_cell.finalization_group());
- finalization_group.RemoveUnregisterToken(
+ JSFinalizationRegistry finalization_registry =
+ JSFinalizationRegistry::cast(weak_cell.finalization_registry());
+ finalization_registry.RemoveUnregisterToken(
JSReceiver::cast(unregister_token), isolate(),
[undefined](WeakCell matched_cell) {
matched_cell.set_unregister_token(undefined);
@@ -2534,6 +2535,9 @@ void MarkCompactCollector::ClearJSWeakRefs() {
RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
+ if (!isolate()->host_cleanup_finalization_group_callback()) {
+ heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
+ }
}
void MarkCompactCollector::AbortWeakObjects() {
@@ -2936,7 +2940,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
- AlwaysAllocateScope always_allocate(heap()->isolate());
+ AlwaysAllocateScope always_allocate(heap());
TimedScope timed_scope(&evacuation_time);
RawEvacuatePage(chunk, &saved_live_bytes);
}
diff --git a/deps/v8/src/heap/marking-worklist.h b/deps/v8/src/heap/marking-worklist.h
index ffa2a15fa1..d4659b0ac7 100644
--- a/deps/v8/src/heap/marking-worklist.h
+++ b/deps/v8/src/heap/marking-worklist.h
@@ -81,6 +81,10 @@ class V8_EXPORT_PRIVATE MarkingWorklistsHolder {
on_hold_.Update(callback);
embedder_.Update(callback);
for (auto cw : context_worklists_) {
+ if (cw.context == kSharedContext) {
+ // The shared context was updated above.
+ continue;
+ }
cw.worklist->Update(callback);
}
}
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 3d2c794ca2..2a59ac5a4d 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -13,6 +13,7 @@
#include "src/heap/factory.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-worklist.h"
+#include "src/logging/counters.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-promise.h"
#include "src/tasks/task-utils.h"
@@ -178,8 +179,12 @@ bool MemoryMeasurement::EnqueueRequest(
}
Handle<WeakFixedArray> global_weak_contexts =
isolate_->global_handles()->Create(*weak_contexts);
- Request request = {std::move(delegate), global_weak_contexts,
- std::vector<size_t>(length), 0u};
+ Request request = {std::move(delegate),
+ global_weak_contexts,
+ std::vector<size_t>(length),
+ 0u,
+ {}};
+ request.timer.Start();
received_.push_back(std::move(request));
ScheduleGCTask(execution);
return true;
@@ -303,6 +308,8 @@ void MemoryMeasurement::ReportResults() {
sizes.push_back(std::make_pair(context, request.sizes[i]));
}
request.delegate->MeasurementComplete(sizes, request.shared);
+ isolate_->counters()->measure_memory_delay_ms()->AddSample(
+ static_cast<int>(request.timer.Elapsed().InMilliseconds()));
}
}
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
index eb12cb152d..d72dd1eba9 100644
--- a/deps/v8/src/heap/memory-measurement.h
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -8,6 +8,7 @@
#include <list>
#include <unordered_map>
+#include "src/base/platform/elapsed-timer.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/objects/map.h"
@@ -34,12 +35,13 @@ class MemoryMeasurement {
Handle<JSPromise> promise, v8::MeasureMemoryMode mode);
private:
- static const int kGCTaskDelayInSeconds = 60;
+ static const int kGCTaskDelayInSeconds = 10;
struct Request {
std::unique_ptr<v8::MeasureMemoryDelegate> delegate;
Handle<WeakFixedArray> contexts;
std::vector<size_t> sizes;
size_t shared;
+ base::ElapsedTimer timer;
};
void ScheduleReportingTask();
void ReportResults();
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 28ef967c5c..fe27095ee5 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -94,7 +94,7 @@ class ObjectStats {
public:
static const size_t kNoOverAllocation = 0;
- explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
+ explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(true); }
// See description on VIRTUAL_INSTANCE_TYPE_LIST.
enum VirtualInstanceType {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index d4d6d9375c..03df9a175d 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -18,6 +18,7 @@
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
#include "src/wasm/wasm-objects.h"
+#include "torque-generated/objects-body-descriptors-tq-inl.h"
namespace v8 {
namespace internal {
@@ -43,6 +44,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
return visitor->Visit##TypeName( \
map, ConcreteVisitor::template Cast<TypeName>(object));
TYPED_VISITOR_ID_LIST(CASE)
+ TORQUE_VISITOR_ID_LIST(CASE)
#undef CASE
case kVisitShortcutCandidate:
return visitor->VisitShortcutCandidate(
@@ -96,6 +98,7 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
return static_cast<ResultType>(size); \
}
TYPED_VISITOR_ID_LIST(VISIT)
+TORQUE_VISITOR_ID_LIST(VISIT)
#undef VISIT
template <typename ResultType, typename ConcreteVisitor>
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index ec494715ba..218a7a03c9 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -185,10 +185,37 @@ struct WeakListVisitor<AllocationSite> {
static void VisitPhantomObject(Heap*, AllocationSite) {}
};
+template <>
+struct WeakListVisitor<JSFinalizationRegistry> {
+ static void SetWeakNext(JSFinalizationRegistry obj, Object next) {
+ obj.set_next_dirty(next, UPDATE_WEAK_WRITE_BARRIER);
+ }
+
+ static Object WeakNext(JSFinalizationRegistry obj) {
+ return obj.next_dirty();
+ }
+
+ static HeapObject WeakNextHolder(JSFinalizationRegistry obj) { return obj; }
+
+ static int WeakNextOffset() {
+ return JSFinalizationRegistry::kNextDirtyOffset;
+ }
+
+ static void VisitLiveObject(Heap* heap, JSFinalizationRegistry obj,
+ WeakObjectRetainer*) {
+ heap->set_dirty_js_finalization_registries_list_tail(obj);
+ }
+
+ static void VisitPhantomObject(Heap*, JSFinalizationRegistry) {}
+};
+
template Object VisitWeakList<Context>(Heap* heap, Object list,
WeakObjectRetainer* retainer);
template Object VisitWeakList<AllocationSite>(Heap* heap, Object list,
WeakObjectRetainer* retainer);
+
+template Object VisitWeakList<JSFinalizationRegistry>(
+ Heap* heap, Object list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index d197bb4fd2..ea70f36ac0 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -23,6 +23,7 @@ namespace internal {
V(CodeDataContainer) \
V(ConsString) \
V(Context) \
+ V(CoverageInfo) \
V(DataHandler) \
V(DescriptorArray) \
V(EmbedderDataArray) \
@@ -67,6 +68,7 @@ namespace internal {
#define FORWARD_DECLARE(TypeName) class TypeName;
TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
+TORQUE_VISITOR_ID_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
// The base class for visitors that need to dispatch on object type. The default
@@ -102,6 +104,7 @@ class HeapVisitor : public ObjectVisitor {
#define VISIT(TypeName) \
V8_INLINE ResultType Visit##TypeName(Map map, TypeName object);
TYPED_VISITOR_ID_LIST(VISIT)
+ TORQUE_VISITOR_ID_LIST(VISIT)
#undef VISIT
V8_INLINE ResultType VisitShortcutCandidate(Map map, ConsString object);
V8_INLINE ResultType VisitDataObject(Map map, HeapObject object);
@@ -143,8 +146,6 @@ class WeakObjectRetainer;
// access the next-element pointers.
template <class T>
Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer);
-template <class T>
-Object VisitWeakList2(Heap* heap, Object list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/off-thread-factory-inl.h b/deps/v8/src/heap/off-thread-factory-inl.h
index 8352565ad5..75f35d94c4 100644
--- a/deps/v8/src/heap/off-thread-factory-inl.h
+++ b/deps/v8/src/heap/off-thread-factory-inl.h
@@ -7,18 +7,29 @@
#include "src/heap/off-thread-factory.h"
+#include "src/heap/factory-base-inl.h"
#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- OffThreadHandle<Type> OffThreadFactory::name() { \
- return OffThreadHandle<Type>(read_only_roots().name()); \
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Handle<Type> OffThreadFactory::name() { \
+ return read_only_roots().name##_handle(); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+#define ACCESSOR_INFO_ACCESSOR(Type, name, CamelName) \
+ Handle<Type> OffThreadFactory::name() { \
+ /* Do a bit of handle location magic to cast the Handle without having */ \
+ /* to pull in Type::cast. We know the type is right by construction. */ \
+ return Handle<Type>( \
+ isolate()->isolate_->root_handle(RootIndex::k##CamelName).location()); \
+ }
+ACCESSOR_INFO_ROOT_LIST(ACCESSOR_INFO_ACCESSOR)
+#undef ACCESSOR_INFO_ACCESSOR
+
#endif // V8_HEAP_OFF_THREAD_FACTORY_INL_H_
} // namespace internal
diff --git a/deps/v8/src/heap/off-thread-factory.cc b/deps/v8/src/heap/off-thread-factory.cc
index 66aca9261c..d8cab5df69 100644
--- a/deps/v8/src/heap/off-thread-factory.cc
+++ b/deps/v8/src/heap/off-thread-factory.cc
@@ -16,10 +16,12 @@
#include "src/objects/heap-object.h"
#include "src/objects/map-inl.h"
#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
#include "src/objects/visitors.h"
#include "src/roots/roots-inl.h"
#include "src/roots/roots.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -106,98 +108,138 @@ void OffThreadFactory::Publish(Isolate* isolate) {
// structure off-thread and merge it into the current handle scope all in one
// go (DeferredHandles maybe?).
std::vector<Handle<HeapObject>> heap_object_handles;
- heap_object_handles.reserve(string_slots_.size());
- for (RelativeSlot relative_slot : string_slots_) {
- // TODO(leszeks): Group slots in the same parent object to avoid creating
- // multiple duplicate handles.
- heap_object_handles.push_back(handle(
- HeapObject::cast(Object(relative_slot.object_address)), isolate));
-
- // De-internalize the string so that we can re-internalize it later.
- ObjectSlot slot(relative_slot.object_address + relative_slot.slot_offset);
- String string = String::cast(slot.Acquire_Load());
- bool one_byte = string.IsOneByteRepresentation();
- Map map = one_byte ? read_only_roots().one_byte_string_map()
- : read_only_roots().string_map();
- string.set_map_no_write_barrier(map);
+ std::vector<Handle<Script>> script_handles;
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.CollectHandles");
+ heap_object_handles.reserve(string_slots_.size());
+ for (RelativeSlot relative_slot : string_slots_) {
+ // TODO(leszeks): Group slots in the same parent object to avoid creating
+ // multiple duplicate handles.
+ heap_object_handles.push_back(handle(
+ HeapObject::cast(Object(relative_slot.object_address)), isolate));
+
+ // De-internalize the string so that we can re-internalize it later.
+ ObjectSlot slot(relative_slot.object_address + relative_slot.slot_offset);
+ String string = String::cast(slot.Acquire_Load());
+ bool one_byte = string.IsOneByteRepresentation();
+ Map map = one_byte ? read_only_roots().one_byte_string_map()
+ : read_only_roots().string_map();
+ string.set_map_no_write_barrier(map);
+ }
+
+ script_handles.reserve(script_list_.size());
+ for (Script script : script_list_) {
+ script_handles.push_back(handle(script, isolate));
+ }
}
// Then merge the spaces. At this point, we are allowed to point between (no
// longer) off-thread pages and main-thread heap pages, and objects in the
// previously off-thread page can move.
- isolate->heap()->old_space()->MergeLocalSpace(&space_);
- isolate->heap()->lo_space()->MergeOffThreadSpace(&lo_space_);
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.Merge");
+ isolate->heap()->old_space()->MergeLocalSpace(&space_);
+ isolate->heap()->lo_space()->MergeOffThreadSpace(&lo_space_);
+ }
// Iterate the string slots, as an offset from the holders we have handles to.
- for (size_t i = 0; i < string_slots_.size(); ++i) {
- int slot_offset = string_slots_[i].slot_offset;
-
- // There's currently no cases where the holder object could have been
- // resized.
- DCHECK_LT(slot_offset, heap_object_handles[i]->Size());
-
- ObjectSlot slot(heap_object_handles[i]->ptr() + slot_offset);
-
- String string = String::cast(slot.Acquire_Load());
- if (string.IsThinString()) {
- // We may have already internalized this string via another slot.
- slot.Release_Store(ThinString::cast(string).GetUnderlying());
- } else {
- HandleScope handle_scope(isolate);
-
- Handle<String> string_handle = handle(string, isolate);
- Handle<String> internalized_string =
- isolate->factory()->InternalizeString(string_handle);
-
- // Recalculate the slot in case there was GC and the holder moved.
- ObjectSlot slot(heap_object_handles[i]->ptr() +
- string_slots_[i].slot_offset);
-
- DCHECK(string_handle->IsThinString() ||
- string_handle->IsInternalizedString());
- if (*string_handle != *internalized_string) {
- slot.Release_Store(*internalized_string);
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.UpdateHandles");
+ for (size_t i = 0; i < string_slots_.size(); ++i) {
+ int slot_offset = string_slots_[i].slot_offset;
+
+ // There's currently no cases where the holder object could have been
+ // resized.
+ DCHECK_LT(slot_offset, heap_object_handles[i]->Size());
+
+ ObjectSlot slot(heap_object_handles[i]->ptr() + slot_offset);
+
+ String string = String::cast(slot.Acquire_Load());
+ if (string.IsThinString()) {
+ // We may have already internalized this string via another slot.
+ slot.Release_Store(ThinString::cast(string).GetUnderlying());
+ } else {
+ HandleScope handle_scope(isolate);
+
+ Handle<String> string_handle = handle(string, isolate);
+ Handle<String> internalized_string =
+ isolate->factory()->InternalizeString(string_handle);
+
+ // Recalculate the slot in case there was GC and the holder moved.
+ ObjectSlot slot(heap_object_handles[i]->ptr() + slot_offset);
+
+ DCHECK(string_handle->IsThinString() ||
+ string_handle->IsInternalizedString());
+ if (*string_handle != *internalized_string) {
+ slot.Release_Store(*internalized_string);
+ }
}
}
- }
-}
-OffThreadHandle<Object> OffThreadFactory::NewInvalidStringLengthError() {
- // TODO(leszeks): Implement.
- UNREACHABLE();
+ // Merge the recorded scripts into the isolate's script list.
+ // This for loop may seem expensive, but practically there's unlikely to be
+ // more than one script in the OffThreadFactory.
+ Handle<WeakArrayList> scripts = isolate->factory()->script_list();
+ for (Handle<Script> script_handle : script_handles) {
+ scripts = WeakArrayList::Append(isolate, scripts,
+ MaybeObjectHandle::Weak(script_handle));
+ }
+ isolate->heap()->SetRootScriptList(*scripts);
+ }
}
// Hacky method for creating a simple object with a slot pointing to a string.
// TODO(leszeks): Remove once we have full FixedArray support.
-OffThreadHandle<FixedArray> OffThreadFactory::StringWrapperForTest(
- OffThreadHandle<String> string) {
+Handle<FixedArray> OffThreadFactory::StringWrapperForTest(
+ Handle<String> string) {
HeapObject wrapper =
AllocateRaw(FixedArray::SizeFor(1), AllocationType::kOld);
wrapper.set_map_after_allocation(read_only_roots().fixed_array_map());
FixedArray array = FixedArray::cast(wrapper);
array.set_length(1);
array.data_start().Relaxed_Store(*string);
- return OffThreadHandle<FixedArray>(array);
+ return handle(array, isolate());
}
-OffThreadHandle<String> OffThreadFactory::MakeOrFindTwoCharacterString(
- uint16_t c1, uint16_t c2) {
+Handle<String> OffThreadFactory::MakeOrFindTwoCharacterString(uint16_t c1,
+ uint16_t c2) {
// TODO(leszeks): Do some real caching here. Also, these strings should be
// internalized.
if ((c1 | c2) <= unibrow::Latin1::kMaxChar) {
- OffThreadHandle<SeqOneByteString> ret =
- NewRawOneByteString(2, AllocationType::kOld);
+ Handle<SeqOneByteString> ret =
+ NewRawOneByteString(2, AllocationType::kOld).ToHandleChecked();
ret->SeqOneByteStringSet(0, c1);
ret->SeqOneByteStringSet(1, c2);
return ret;
}
- OffThreadHandle<SeqTwoByteString> ret =
- NewRawTwoByteString(2, AllocationType::kOld);
+ Handle<SeqTwoByteString> ret =
+ NewRawTwoByteString(2, AllocationType::kOld).ToHandleChecked();
ret->SeqTwoByteStringSet(0, c1);
ret->SeqTwoByteStringSet(1, c2);
return ret;
}
+Handle<String> OffThreadFactory::InternalizeString(
+ const Vector<const uint8_t>& string) {
+ uint32_t hash = StringHasher::HashSequentialString(
+ string.begin(), string.length(), HashSeed(read_only_roots()));
+ return NewOneByteInternalizedString(string, hash);
+}
+
+Handle<String> OffThreadFactory::InternalizeString(
+ const Vector<const uint16_t>& string) {
+ uint32_t hash = StringHasher::HashSequentialString(
+ string.begin(), string.length(), HashSeed(read_only_roots()));
+ return NewTwoByteInternalizedString(string, hash);
+}
+
+void OffThreadFactory::AddToScriptList(Handle<Script> shared) {
+ script_list_.push_back(*shared);
+}
+
HeapObject OffThreadFactory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
DCHECK(!is_finished);
@@ -212,10 +254,5 @@ HeapObject OffThreadFactory::AllocateRaw(int size, AllocationType allocation,
return result.ToObjectChecked();
}
-void OffThreadFactory::FatalProcessOutOfHeapMemory(const char* location) {
- // TODO(leszeks): Do something reasonable.
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/off-thread-factory.h b/deps/v8/src/heap/off-thread-factory.h
index 244e04189d..f297bd30c6 100644
--- a/deps/v8/src/heap/off-thread-factory.h
+++ b/deps/v8/src/heap/off-thread-factory.h
@@ -7,6 +7,7 @@
#include <map>
#include <vector>
+#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/heap/factory-base.h"
@@ -14,7 +15,9 @@
#include "src/heap/read-only-heap.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
+#include "src/objects/map.h"
#include "src/objects/objects.h"
+#include "src/objects/shared-function-info.h"
#include "src/roots/roots.h"
namespace v8 {
@@ -23,16 +26,7 @@ namespace internal {
class AstValueFactory;
class AstRawString;
class AstConsString;
-
-class OffThreadFactory;
-
-template <>
-struct FactoryTraits<OffThreadFactory> {
- template <typename T>
- using HandleType = OffThreadHandle<T>;
- template <typename T>
- using MaybeHandleType = OffThreadHandle<T>;
-};
+class OffThreadIsolate;
struct RelativeSlot {
RelativeSlot() = default;
@@ -50,18 +44,29 @@ class V8_EXPORT_PRIVATE OffThreadFactory
ReadOnlyRoots read_only_roots() const { return roots_; }
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- inline OffThreadHandle<Type> name();
+#define ROOT_ACCESSOR(Type, name, CamelName) inline Handle<Type> name();
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
+ // AccessorInfos appear mutable, but they're actually not mutated once they
+ // finish initializing. In particular, the root accessors are not mutated and
+ // are safe to access (as long as the off-thread job doesn't try to mutate
+ // them).
+ ACCESSOR_INFO_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+ Handle<String> InternalizeString(const Vector<const uint8_t>& string);
+ Handle<String> InternalizeString(const Vector<const uint16_t>& string);
+
void FinishOffThread();
void Publish(Isolate* isolate);
- OffThreadHandle<Object> NewInvalidStringLengthError();
+ // The parser shouldn't allow the OffThreadFactory to get into a state where
+ // it generates errors.
+ Handle<Object> NewInvalidStringLengthError() { UNREACHABLE(); }
+ Handle<Object> NewRangeError(MessageTemplate template_index) {
+ UNREACHABLE();
+ }
- OffThreadHandle<FixedArray> StringWrapperForTest(
- OffThreadHandle<String> string);
+ Handle<FixedArray> StringWrapperForTest(Handle<String> string);
private:
friend class FactoryBase<OffThreadFactory>;
@@ -70,23 +75,28 @@ class V8_EXPORT_PRIVATE OffThreadFactory
// Customization points for FactoryBase.
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
- template <typename T>
- OffThreadHandle<T> Throw(OffThreadHandle<Object> exception) {
- // TODO(leszeks): Figure out what to do here.
- return OffThreadHandle<T>();
+
+ OffThreadIsolate* isolate() {
+ // Downcast to the privately inherited sub-class using c-style casts to
+ // avoid undefined behavior (as static_cast cannot cast across private
+ // bases).
+ // NOLINTNEXTLINE (google-readability-casting)
+ return (OffThreadIsolate*)this; // NOLINT(readability/casting)
}
- [[noreturn]] void FatalProcessOutOfHeapMemory(const char* location);
inline bool CanAllocateInReadOnlySpace() { return false; }
inline bool EmptyStringRootIsInitialized() { return true; }
// ------
- OffThreadHandle<String> MakeOrFindTwoCharacterString(uint16_t c1,
- uint16_t c2);
+ Handle<String> MakeOrFindTwoCharacterString(uint16_t c1, uint16_t c2);
+
+ void AddToScriptList(Handle<Script> shared);
+ // ------
ReadOnlyRoots roots_;
OffThreadSpace space_;
OffThreadLargeObjectSpace lo_space_;
std::vector<RelativeSlot> string_slots_;
+ std::vector<Script> script_list_;
bool is_finished = false;
};
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 2050d3729b..5b0e29bf12 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -52,9 +52,12 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
#ifdef DEBUG
const base::Optional<uint32_t> last_checksum =
shared_ro_heap_->read_only_blob_checksum_;
- if (last_checksum || des_checksum) {
+ if (last_checksum) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
+ CHECK_WITH_MSG(des_checksum,
+ "Attempt to create the read-only heap after "
+ "already creating from a snapshot.");
CHECK_EQ(last_checksum, des_checksum);
} else {
// The read-only heap objects were created. Make sure this happens only
@@ -144,9 +147,7 @@ bool ReadOnlyHeap::Contains(Address address) {
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- // read only includes both TPH and the snapshot, so need both checks
- return third_party_heap::Heap::InReadOnlySpace(object.address()) ||
- MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
+ return third_party_heap::Heap::InReadOnlySpace(object.address());
} else {
return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
new file mode 100644
index 0000000000..f524b30e74
--- /dev/null
+++ b/deps/v8/src/heap/safepoint.cc
@@ -0,0 +1,134 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/safepoint.h"
+
+#include "src/handles/local-handles.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+
+namespace v8 {
+namespace internal {
+
+Safepoint::Safepoint(Heap* heap) : heap_(heap), local_heaps_head_(nullptr) {}
+
+void Safepoint::Start() { StopThreads(); }
+
+void Safepoint::End() { ResumeThreads(); }
+
+void Safepoint::StopThreads() {
+ local_heaps_mutex_.Lock();
+
+ barrier_.Arm();
+
+ for (LocalHeap* current = local_heaps_head_; current;
+ current = current->next_) {
+ current->RequestSafepoint();
+ }
+
+ for (LocalHeap* current = local_heaps_head_; current;
+ current = current->next_) {
+ current->state_mutex_.Lock();
+
+ while (current->state_ == LocalHeap::ThreadState::Running) {
+ current->state_change_.Wait(&current->state_mutex_);
+ }
+ }
+}
+
+void Safepoint::ResumeThreads() {
+ for (LocalHeap* current = local_heaps_head_; current;
+ current = current->next_) {
+ current->state_mutex_.Unlock();
+ }
+
+ barrier_.Disarm();
+
+ local_heaps_mutex_.Unlock();
+}
+
+void Safepoint::EnterFromThread(LocalHeap* local_heap) {
+ {
+ base::MutexGuard guard(&local_heap->state_mutex_);
+ local_heap->state_ = LocalHeap::ThreadState::Safepoint;
+ local_heap->state_change_.NotifyAll();
+ }
+
+ barrier_.Wait();
+
+ {
+ base::MutexGuard guard(&local_heap->state_mutex_);
+ local_heap->state_ = LocalHeap::ThreadState::Running;
+ }
+}
+
+void Safepoint::Barrier::Arm() {
+ base::MutexGuard guard(&mutex_);
+ CHECK(!armed_);
+ armed_ = true;
+}
+
+void Safepoint::Barrier::Disarm() {
+ base::MutexGuard guard(&mutex_);
+ CHECK(armed_);
+ armed_ = false;
+ cond_.NotifyAll();
+}
+
+void Safepoint::Barrier::Wait() {
+ base::MutexGuard guard(&mutex_);
+ while (armed_) {
+ cond_.Wait(&mutex_);
+ }
+}
+
+SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
+ safepoint_->StopThreads();
+}
+
+SafepointScope::~SafepointScope() { safepoint_->ResumeThreads(); }
+
+void Safepoint::AddLocalHeap(LocalHeap* local_heap) {
+ base::MutexGuard guard(&local_heaps_mutex_);
+ if (local_heaps_head_) local_heaps_head_->prev_ = local_heap;
+ local_heap->prev_ = nullptr;
+ local_heap->next_ = local_heaps_head_;
+ local_heaps_head_ = local_heap;
+}
+
+void Safepoint::RemoveLocalHeap(LocalHeap* local_heap) {
+ base::MutexGuard guard(&local_heaps_mutex_);
+ if (local_heap->next_) local_heap->next_->prev_ = local_heap->prev_;
+ if (local_heap->prev_)
+ local_heap->prev_->next_ = local_heap->next_;
+ else
+ local_heaps_head_ = local_heap->next_;
+}
+
+bool Safepoint::ContainsLocalHeap(LocalHeap* local_heap) {
+ base::MutexGuard guard(&local_heaps_mutex_);
+ LocalHeap* current = local_heaps_head_;
+
+ while (current) {
+ if (current == local_heap) return true;
+ current = current->next_;
+ }
+
+ return false;
+}
+
+bool Safepoint::ContainsAnyLocalHeap() {
+ base::MutexGuard guard(&local_heaps_mutex_);
+ return local_heaps_head_ != nullptr;
+}
+
+void Safepoint::Iterate(RootVisitor* visitor) {
+ for (LocalHeap* current = local_heaps_head_; current;
+ current = current->next_) {
+ current->handles()->Iterate(visitor);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
new file mode 100644
index 0000000000..4b0036c047
--- /dev/null
+++ b/deps/v8/src/heap/safepoint.h
@@ -0,0 +1,77 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SAFEPOINT_H_
+#define V8_HEAP_SAFEPOINT_H_
+
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class LocalHeap;
+class RootVisitor;
+
+class Safepoint {
+ public:
+ explicit Safepoint(Heap* heap);
+
+ // Enter the safepoint from a thread
+ void EnterFromThread(LocalHeap* local_heap);
+
+ V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
+ V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
+
+ // Iterate handles in local heaps
+ void Iterate(RootVisitor* visitor);
+
+ // Use these methods now instead of the more intrusive SafepointScope
+ void Start();
+ void End();
+
+ private:
+ class Barrier {
+ base::Mutex mutex_;
+ base::ConditionVariable cond_;
+ bool armed_;
+
+ public:
+ Barrier() : armed_(false) {}
+
+ void Arm();
+ void Disarm();
+ void Wait();
+ };
+
+ void StopThreads();
+ void ResumeThreads();
+
+ void AddLocalHeap(LocalHeap* local_heap);
+ void RemoveLocalHeap(LocalHeap* local_heap);
+
+ Barrier barrier_;
+ Heap* heap_;
+
+ base::Mutex local_heaps_mutex_;
+ LocalHeap* local_heaps_head_;
+
+ friend class SafepointScope;
+ friend class LocalHeap;
+};
+
+class SafepointScope {
+ public:
+ V8_EXPORT_PRIVATE explicit SafepointScope(Heap* heap);
+ V8_EXPORT_PRIVATE ~SafepointScope();
+
+ private:
+ Safepoint* safepoint_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_SAFEPOINT_H_
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 3730bfeecb..a731e37be0 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -7,112 +7,59 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
-#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
-#include "src/heap/spaces.h"
#include "src/init/v8.h"
namespace v8 {
namespace internal {
+class ScavengeJob::Task : public CancelableTask {
+ public:
+ Task(Isolate* isolate, ScavengeJob* job)
+ : CancelableTask(isolate), isolate_(isolate), job_(job) {}
-const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
+ // CancelableTask overrides.
+ void RunInternal() override;
-void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
- VMState<GC> state(isolate());
- TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
- Heap* heap = isolate()->heap();
- double deadline_in_ms =
- deadline_in_seconds *
- static_cast<double>(base::Time::kMillisecondsPerSecond);
- double start_ms = heap->MonotonicallyIncreasingTimeInMs();
- double idle_time_in_ms = deadline_in_ms - start_ms;
- double scavenge_speed_in_bytes_per_ms =
- heap->tracer()->ScavengeSpeedInBytesPerMillisecond();
- size_t new_space_size = heap->new_space()->Size();
- size_t new_space_capacity = heap->new_space()->Capacity();
+ Isolate* isolate() const { return isolate_; }
- job_->NotifyIdleTask();
+ private:
+ Isolate* const isolate_;
+ ScavengeJob* const job_;
+};
- if (ReachedIdleAllocationLimit(scavenge_speed_in_bytes_per_ms, new_space_size,
- new_space_capacity)) {
- if (EnoughIdleTimeForScavenge(
- idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
- heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kIdleTask);
- } else {
- // Immediately request another idle task that can get larger idle time.
- job_->RescheduleIdleTask(heap);
- }
- }
+size_t ScavengeJob::YoungGenerationTaskTriggerSize(Heap* heap) {
+ static constexpr double kTaskTriggerFactor = 0.8;
+ return heap->new_space()->Capacity() * kTaskTriggerFactor;
}
-bool ScavengeJob::ReachedIdleAllocationLimit(
- double scavenge_speed_in_bytes_per_ms, size_t new_space_size,
- size_t new_space_capacity) {
- if (scavenge_speed_in_bytes_per_ms == 0) {
- scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
- }
-
- // Set the allocation limit to the number of bytes we can scavenge in an
- // average idle task.
- double allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
-
- // Keep the limit smaller than the new space capacity.
- allocation_limit =
- Min<double>(allocation_limit,
- new_space_capacity * kMaxAllocationLimitAsFractionOfNewSpace);
- // Adjust the limit to take into account bytes that will be allocated until
- // the next check and keep the limit large enough to avoid scavenges in tiny
- // new space.
- allocation_limit =
- Max<double>(allocation_limit - kBytesAllocatedBeforeNextIdleTask,
- kMinAllocationLimit);
-
- return allocation_limit <= new_space_size;
+bool ScavengeJob::YoungGenerationSizeTaskTriggerReached(Heap* heap) {
+ return heap->new_space()->Size() >= YoungGenerationTaskTriggerSize(heap);
}
-bool ScavengeJob::EnoughIdleTimeForScavenge(
- double idle_time_in_ms, double scavenge_speed_in_bytes_per_ms,
- size_t new_space_size) {
- if (scavenge_speed_in_bytes_per_ms == 0) {
- scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+void ScavengeJob::ScheduleTaskIfNeeded(Heap* heap) {
+ if (!task_pending_ && !heap->IsTearingDown() &&
+ YoungGenerationSizeTaskTriggerReached(heap)) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+ auto taskrunner =
+ V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+ taskrunner->PostTask(std::make_unique<Task>(heap->isolate(), this));
+ task_pending_ = true;
}
- return new_space_size <= idle_time_in_ms * scavenge_speed_in_bytes_per_ms;
}
+void ScavengeJob::Task::RunInternal() {
+ VMState<GC> state(isolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
-void ScavengeJob::RescheduleIdleTask(Heap* heap) {
- // Make sure that we don't reschedule more than one time.
- // Otherwise, we might spam the scheduler with idle tasks.
- if (!idle_task_rescheduled_) {
- ScheduleIdleTask(heap);
- idle_task_rescheduled_ = true;
+ if (ScavengeJob::YoungGenerationSizeTaskTriggerReached(isolate()->heap())) {
+ isolate()->heap()->CollectGarbage(NEW_SPACE,
+ GarbageCollectionReason::kTask);
}
-}
-
-void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
- bytes_allocated_since_the_last_task_ += bytes_allocated;
- if (bytes_allocated_since_the_last_task_ >=
- static_cast<int>(kBytesAllocatedBeforeNextIdleTask)) {
- ScheduleIdleTask(heap);
- bytes_allocated_since_the_last_task_ = 0;
- idle_task_rescheduled_ = false;
- }
+ job_->set_task_pending(false);
}
-
-void ScavengeJob::ScheduleIdleTask(Heap* heap) {
- if (!idle_task_pending_ && !heap->IsTearingDown()) {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
- if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
- idle_task_pending_ = true;
- auto task = std::make_unique<IdleTask>(heap->isolate(), this);
- V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate)->PostIdleTask(
- std::move(task));
- }
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index 2b35ccbb18..815f1b8d09 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -5,7 +5,6 @@
#ifndef V8_HEAP_SCAVENGE_JOB_H_
#define V8_HEAP_SCAVENGE_JOB_H_
-#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
namespace v8 {
@@ -14,67 +13,24 @@ namespace internal {
class Heap;
class Isolate;
-// This class posts idle tasks and performs scavenges in the idle tasks.
-class V8_EXPORT_PRIVATE ScavengeJob {
+// The scavenge job uses platform tasks to perform a young generation
+// Scavenge garbage collection. The job posts a foreground task.
+class ScavengeJob {
public:
- class IdleTask : public CancelableIdleTask {
- public:
- explicit IdleTask(Isolate* isolate, ScavengeJob* job)
- : CancelableIdleTask(isolate), isolate_(isolate), job_(job) {}
- // CancelableIdleTask overrides.
- void RunInternal(double deadline_in_seconds) override;
+ ScavengeJob() V8_NOEXCEPT = default;
- Isolate* isolate() { return isolate_; }
+ void ScheduleTaskIfNeeded(Heap* heap);
- private:
- Isolate* isolate_;
- ScavengeJob* job_;
- };
+ static size_t YoungGenerationTaskTriggerSize(Heap* heap);
- ScavengeJob()
- : idle_task_pending_(false),
- idle_task_rescheduled_(false),
- bytes_allocated_since_the_last_task_(0) {}
-
- // Posts an idle task if the cumulative bytes allocated since the last
- // idle task exceed kBytesAllocatedBeforeNextIdleTask.
- void ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated);
-
- // Posts an idle task ignoring the bytes allocated, but makes sure
- // that the new idle task cannot reschedule again.
- // This prevents infinite rescheduling.
- void RescheduleIdleTask(Heap* heap);
-
- bool IdleTaskPending() { return idle_task_pending_; }
- void NotifyIdleTask() { idle_task_pending_ = false; }
- bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
-
- static bool ReachedIdleAllocationLimit(double scavenge_speed_in_bytes_per_ms,
- size_t new_space_size,
- size_t new_space_capacity);
+ private:
+ class Task;
- static bool EnoughIdleTimeForScavenge(double idle_time_ms,
- double scavenge_speed_in_bytes_per_ms,
- size_t new_space_size);
+ static bool YoungGenerationSizeTaskTriggerReached(Heap* heap);
- // If we haven't recorded any scavenger events yet, we use a conservative
- // lower bound for the scavenger speed.
- static const int kInitialScavengeSpeedInBytesPerMs = 256 * KB;
- // Estimate of the average idle time that an idle task gets.
- static const int kAverageIdleTimeMs = 5;
- // The number of bytes to be allocated in new space before the next idle
- // task is posted.
- static const size_t kBytesAllocatedBeforeNextIdleTask = 1024 * KB;
- // The minimum size of allocated new space objects to trigger a scavenge.
- static const size_t kMinAllocationLimit = 512 * KB;
- // The allocation limit cannot exceed this fraction of the new space capacity.
- static const double kMaxAllocationLimitAsFractionOfNewSpace;
+ void set_task_pending(bool value) { task_pending_ = value; }
- private:
- void ScheduleIdleTask(Heap* heap);
- bool idle_task_pending_;
- bool idle_task_rescheduled_;
- int bytes_allocated_since_the_last_task_;
+ bool task_pending_ = false;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 58929c6691..8c9d8cd456 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -67,6 +67,10 @@ bool Heap::CreateHeapObjects() {
set_native_contexts_list(ReadOnlyRoots(this).undefined_value());
set_allocation_sites_list(ReadOnlyRoots(this).undefined_value());
+ set_dirty_js_finalization_registries_list(
+ ReadOnlyRoots(this).undefined_value());
+ set_dirty_js_finalization_registries_list_tail(
+ ReadOnlyRoots(this).undefined_value());
return true;
}
@@ -399,6 +403,18 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_NAME_DICTIONARY_TYPE,
small_ordered_name_dictionary)
+#define TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR(V, NAME, Name, name) \
+ ALLOCATE_MAP(NAME, Name::kSize, name)
+ TORQUE_INTERNAL_FIXED_CLASS_LIST_GENERATOR(
+ TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR, _);
+#undef TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR
+
+#define TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR(V, NAME, Name, name) \
+ ALLOCATE_VARSIZE_MAP(NAME, name)
+ TORQUE_INTERNAL_VARSIZE_CLASS_LIST_GENERATOR(
+ TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR, _);
+#undef TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR
+
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
@@ -452,6 +468,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
object_boilerplate_description)
+ ALLOCATE_VARSIZE_MAP(COVERAGE_INFO_TYPE, coverage_info);
+
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
side_effect_call_handler_info)
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
@@ -618,7 +636,6 @@ void Heap::CreateInitialObjects() {
// There's no "current microtask" in the beginning.
set_current_microtask(roots.undefined_value());
- set_dirty_js_finalization_groups(roots.undefined_value());
set_weak_refs_keep_during_job(roots.undefined_value());
// Allocate cache for single character one byte strings.
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index bfa568dd29..a7aec2ff1f 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -14,6 +14,7 @@
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
+#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@@ -155,7 +156,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
code_page_allocator_ = page_allocator;
if (requested == 0) {
- if (!kRequiresCodeRange) return;
+ if (!isolate_->RequiresCodeRange()) return;
// When a target requires the code range feature, we put all code objects
// in a kMaximalCodeRangeSize range of virtual address space, so that
// they can call each other with near calls.
@@ -172,7 +173,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
// alignments is not supported (requires re-implementation).
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
}
- DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+ DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
Address hint =
RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
@@ -213,7 +214,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
requested));
- heap_reservation_ = std::move(reservation);
+ code_reservation_ = std::move(reservation);
code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
page_allocator, aligned_base, size,
static_cast<size_t>(MemoryChunk::kAlignment));
@@ -761,7 +762,8 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
- page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->SetOldGenerationPageFlags(!is_off_thread_space() &&
+ heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
@@ -1118,7 +1120,7 @@ size_t Page::ShrinkToHighWaterMark() {
void Page::CreateBlackArea(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
- DCHECK_NE(start, end);
+ DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
@@ -1130,7 +1132,7 @@ void Page::CreateBlackArea(Address start, Address end) {
void Page::DestroyBlackArea(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
- DCHECK_NE(start, end);
+ DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
@@ -1743,6 +1745,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
if (merging_from_off_thread) {
DCHECK_NULL(p->sweeping_slot_set());
+ p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
if (heap()->incremental_marking()->black_allocation()) {
p->CreateBlackArea(p->area_start(), p->HighWaterMark());
}
@@ -1911,7 +1914,7 @@ int PagedSpace::CountTotalPages() {
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
- if (top != kNullAddress && top != limit &&
+ if (top != kNullAddress && top != limit && !is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
}
@@ -1995,7 +1998,8 @@ void PagedSpace::FreeLinearAllocationArea() {
return;
}
- if (heap()->incremental_marking()->black_allocation()) {
+ if (!is_off_thread_space() &&
+ heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAllocationAreaAddress(current_top);
// Clear the bits in the unused black area.
@@ -2199,7 +2203,9 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = PerIsolateAccountingLength(array_buffer);
+ size_t size =
+ ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
+ ->PerIsolateAccountingLength();
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2211,10 +2217,20 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
}
}
for (int i = 0; i < kNumTypes; i++) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ i == ExternalBackingStoreType::kArrayBuffer)
+ continue;
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
CHECK(allocation_pointer_found_in_space);
+
+ if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+ size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
+
#ifdef DEBUG
VerifyCountersAfterSweeping(isolate->heap());
#endif
@@ -2687,7 +2703,8 @@ void NewSpace::Verify(Isolate* isolate) {
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = PerIsolateAccountingLength(array_buffer);
+ size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
+ ->PerIsolateAccountingLength();
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2701,10 +2718,19 @@ void NewSpace::Verify(Isolate* isolate) {
}
for (int i = 0; i < kNumTypes; i++) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ i == ExternalBackingStoreType::kArrayBuffer)
+ continue;
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+ size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
+
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
@@ -4487,7 +4513,12 @@ void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
}
OffThreadLargeObjectSpace::OffThreadLargeObjectSpace(Heap* heap)
- : LargeObjectSpace(heap, LO_SPACE) {}
+ : LargeObjectSpace(heap, LO_SPACE) {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ // OffThreadLargeObjectSpace doesn't work with third-party heap.
+ UNREACHABLE();
+#endif
+}
AllocationResult OffThreadLargeObjectSpace::AllocateRaw(int object_size) {
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 9d0de53f82..8d587443e3 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_
+#include <atomic>
#include <list>
#include <map>
#include <memory>
@@ -641,10 +642,12 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
@@ -658,12 +661,11 @@ class MemoryChunk : public BasicMemoryChunk {
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
- intptr_t old_mark = 0;
- do {
- old_mark = chunk->high_water_mark_;
- } while (
- (new_mark > old_mark) &&
- !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
+ intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
+ while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.compare_exchange_weak(
+ old_mark, new_mark, std::memory_order_acq_rel)) {
+ }
}
static inline void MoveExternalBackingStoreBytes(
@@ -1451,15 +1453,14 @@ class MemoryAllocator {
// The use of atomic primitives does not guarantee correctness (wrt.
// desired semantics) by default. The loop here ensures that we update the
// values only if they did not change in between.
- Address ptr = kNullAddress;
- do {
- ptr = lowest_ever_allocated_;
- } while ((low < ptr) &&
- !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
- do {
- ptr = highest_ever_allocated_;
- } while ((high > ptr) &&
- !highest_ever_allocated_.compare_exchange_weak(ptr, high));
+ Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
+ while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
+ ptr, low, std::memory_order_acq_rel)) {
+ }
+ ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
+ while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
+ ptr, high, std::memory_order_acq_rel)) {
+ }
}
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
@@ -1476,14 +1477,9 @@ class MemoryAllocator {
Isolate* isolate_;
- // This object controls virtual space reserved for V8 heap instance.
- // Depending on the configuration it may contain the following:
- // - no reservation (on 32-bit architectures)
- // - code range reservation used by bounded code page allocator (on 64-bit
- // architectures without pointers compression in V8 heap)
- // - data + code range reservation (on 64-bit architectures with pointers
- // compression in V8 heap)
- VirtualMemory heap_reservation_;
+ // This object controls virtual space reserved for code on the V8 heap. This
+ // is only valid for 64-bit architectures where kRequiresCodeRange.
+ VirtualMemory code_reservation_;
// Page allocator used for allocating data pages. Depending on the
// configuration it may be a page allocator instance provided by v8::Platform
@@ -1497,7 +1493,7 @@ class MemoryAllocator {
// can be used for call and jump instructions).
v8::PageAllocator* code_page_allocator_;
- // A part of the |heap_reservation_| that may contain executable code
+ // A part of the |code_reservation_| that may contain executable code
// including reserved page with read-write access in the beginning.
// See details below.
base::AddressRegion code_range_;
@@ -1506,9 +1502,9 @@ class MemoryAllocator {
// that controls executable pages allocation. It does not control the
// optionally existing page in the beginning of the |code_range_|.
// So, summarizing all above, the following conditions hold:
- // 1) |heap_reservation_| >= |code_range_|
+ // 1) |code_reservation_| >= |code_range_|
// 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
- // 3) |heap_reservation_| is AllocatePageSize()-aligned
+ // 3) |code_reservation_| is AllocatePageSize()-aligned
// 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
// 5) |code_range_| is CommitPageSize()-aligned
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
@@ -2869,6 +2865,9 @@ class V8_EXPORT_PRIVATE NewSpace
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ type == ExternalBackingStoreType::kArrayBuffer)
+ return heap()->YoungArrayBufferBytes();
DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
return to_space_.ExternalBackingStoreBytes(type);
}
@@ -3133,6 +3132,13 @@ class OldSpace : public PagedSpace {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ type == ExternalBackingStoreType::kArrayBuffer)
+ return heap()->OldArrayBufferBytes();
+ return external_backing_store_bytes_[type];
+ }
};
// -----------------------------------------------------------------------------
@@ -3184,7 +3190,12 @@ class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
public:
explicit OffThreadSpace(Heap* heap)
: LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- LocalSpaceKind::kOffThreadSpace) {}
+ LocalSpaceKind::kOffThreadSpace) {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ // OffThreadSpace doesn't work with third-party heap.
+ UNREACHABLE();
+#endif
+ }
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
@@ -3289,6 +3300,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+ virtual bool is_off_thread() const { return false; }
+
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
@@ -3393,6 +3406,8 @@ class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
void FreeUnmarkedObjects() override;
+ bool is_off_thread() const override { return true; }
+
protected:
// OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
friend class OldLargeObjectSpace;
diff --git a/deps/v8/src/heap/third-party/heap-api.h b/deps/v8/src/heap/third-party/heap-api.h
index 69a84114ee..5eaae847a7 100644
--- a/deps/v8/src/heap/third-party/heap-api.h
+++ b/deps/v8/src/heap/third-party/heap-api.h
@@ -6,6 +6,7 @@
#define V8_HEAP_THIRD_PARTY_HEAP_API_H_
#include "include/v8.h"
+#include "src/base/address-region.h"
#include "src/heap/heap.h"
namespace v8 {
@@ -14,15 +15,17 @@ namespace third_party_heap {
class Heap {
public:
- static std::unique_ptr<Heap> New(v8::Isolate* isolate);
+ static std::unique_ptr<Heap> New(v8::internal::Isolate* isolate);
- static v8::Isolate* GetIsolate(Address address);
+ static v8::internal::Isolate* GetIsolate(Address address);
AllocationResult Allocate(size_t size_in_bytes, AllocationType type,
AllocationAlignment align);
Address GetObjectFromInnerPointer(Address inner_pointer);
+ const base::AddressRegion& GetCodeRange();
+
static bool InCodeSpace(Address address);
static bool InReadOnlySpace(Address address);
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index 82a278a042..0f5f13cdf7 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -148,6 +148,9 @@ class Worklist {
private_push_segment(task_id)->Size();
}
+ // Thread-safe but may return an outdated result.
+ size_t GlobalPoolSize() const { return global_pool_.Size(); }
+
// Clears all segments. Frees the global segment pool.
//
// Assumes that no other tasks are running.
@@ -201,8 +204,7 @@ class Worklist {
}
void MergeGlobalPool(Worklist* other) {
- auto pair = other->global_pool_.Extract();
- global_pool_.MergeList(pair.first, pair.second);
+ global_pool_.Merge(&other->global_pool_);
}
private:
@@ -282,17 +284,23 @@ class Worklist {
Segment* temp = top_;
set_top(other.top_);
other.set_top(temp);
+ size_t other_size = other.size_.exchange(
+ size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ size_.store(other_size, std::memory_order_relaxed);
}
V8_INLINE void Push(Segment* segment) {
base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
+ size_.fetch_add(1, std::memory_order_relaxed);
}
V8_INLINE bool Pop(Segment** segment) {
base::MutexGuard guard(&lock_);
if (top_ != nullptr) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
*segment = top_;
set_top(top_->next());
return true;
@@ -304,8 +312,16 @@ class Worklist {
return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
}
+ V8_INLINE size_t Size() const {
+ // It is safe to read |size_| without a lock since this variable is
+ // atomic, keeping in mind that threads may not immediately see the new
+ // value when it is updated.
+ return size_.load(std::memory_order_relaxed);
+ }
+
void Clear() {
base::MutexGuard guard(&lock_);
+ size_.store(0, std::memory_order_relaxed);
Segment* current = top_;
while (current != nullptr) {
Segment* tmp = current;
@@ -321,9 +337,12 @@ class Worklist {
base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
+ size_t num_deleted = 0;
while (current != nullptr) {
current->Update(callback);
if (current->IsEmpty()) {
+ DCHECK_LT(0U, size_);
+ ++num_deleted;
if (prev == nullptr) {
top_ = current->next();
} else {
@@ -337,6 +356,7 @@ class Worklist {
current = current->next();
}
}
+ size_.fetch_sub(num_deleted, std::memory_order_relaxed);
}
// See Worklist::Iterate.
@@ -349,25 +369,28 @@ class Worklist {
}
}
- std::pair<Segment*, Segment*> Extract() {
+ void Merge(GlobalPool* other) {
Segment* top = nullptr;
+ size_t other_size = 0;
{
- base::MutexGuard guard(&lock_);
- if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
- top = top_;
- set_top(nullptr);
+ base::MutexGuard guard(&other->lock_);
+ if (!other->top_) return;
+ top = other->top_;
+ other_size = other->size_.load(std::memory_order_relaxed);
+ other->size_.store(0, std::memory_order_relaxed);
+ other->set_top(nullptr);
}
+
+ // It's safe to iterate through these segments because the top was
+ // extracted from |other|.
Segment* end = top;
- while (end->next() != nullptr) end = end->next();
- return std::make_pair(top, end);
- }
+ while (end->next()) end = end->next();
- void MergeList(Segment* start, Segment* end) {
- if (start == nullptr) return;
{
base::MutexGuard guard(&lock_);
+ size_.fetch_add(other_size, std::memory_order_relaxed);
end->set_next(top_);
- set_top(start);
+ set_top(top);
}
}
@@ -378,6 +401,7 @@ class Worklist {
base::Mutex lock_;
Segment* top_;
+ std::atomic<size_t> size_{0};
};
V8_INLINE Segment*& private_push_segment(int task_id) {
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 1fd29fe169..d3731a7465 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -64,8 +64,9 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
}
TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase(
- TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map,
- Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss) {
+ TNode<TaggedIndex> slot, TNode<FeedbackVector> vector,
+ TNode<Map> receiver_map, Label* if_handler,
+ TVariable<MaybeObject>* var_handler, Label* if_miss) {
Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
@@ -225,10 +226,9 @@ void AccessorAssembler::HandleLoadAccessor(
Goto(&load);
BIND(&load);
- Callable callable = CodeFactory::CallApiCallback(isolate());
TNode<IntPtrT> argc = IntPtrConstant(0);
- exit_point->Return(CallStub(callable, context, callback, argc, data,
- api_holder.value(), p->receiver()));
+ exit_point->Return(CallApiCallback(context, callback, argc, data,
+ api_holder.value(), p->receiver()));
}
void AccessorAssembler::HandleLoadField(TNode<JSObject> holder,
@@ -563,8 +563,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
- Callable callable = CodeFactory::Call(isolate());
- exit_point->Return(CallJS(callable, p->context(), getter, p->receiver()));
+ exit_point->Return(Call(p->context(), getter, p->receiver()));
}
BIND(&native_data_property);
@@ -1496,7 +1495,7 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
Callable callable = CodeFactory::Call(isolate());
- Return(CallJS(callable, p->context(), setter, p->receiver(), p->value()));
+ Return(Call(p->context(), setter, p->receiver(), p->value()));
}
void AccessorAssembler::HandleStoreICProtoHandler(
@@ -1674,10 +1673,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Goto(&store);
BIND(&store);
- Callable callable = CodeFactory::CallApiCallback(isolate());
TNode<IntPtrT> argc = IntPtrConstant(1);
- Return(CallStub(callable, context, callback, argc, data,
- api_holder.value(), p->receiver(), p->value()));
+ Return(CallApiCallback(context, callback, argc, data, api_holder.value(),
+ p->receiver(), p->value()));
}
BIND(&if_store_global_proxy);
@@ -1989,7 +1987,7 @@ TNode<PropertyArray> AccessorAssembler::ExtendPropertiesBackingStore(
mode));
TNode<PropertyArray> new_properties =
- CAST(AllocatePropertyArray(new_capacity, mode));
+ AllocatePropertyArray(new_capacity, mode);
var_new_properties = new_properties;
FillPropertyArrayWithUndefined(new_properties, var_length.value(),
@@ -2831,8 +2829,7 @@ void AccessorAssembler::LoadIC_NoFeedback(const LoadICParameters* p,
}
void AccessorAssembler::LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
- const LazyNode<Smi>& lazy_smi_slot,
- const LazyNode<UintPtrT>& lazy_slot,
+ const LazyNode<TaggedIndex>& lazy_slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name,
TypeofMode typeof_mode,
@@ -2842,24 +2839,27 @@ void AccessorAssembler::LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback);
{
- TNode<FeedbackVector> vector = CAST(maybe_feedback_vector);
- TNode<UintPtrT> slot = lazy_slot();
- LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point,
- &try_handler, &miss);
+ TNode<TaggedIndex> slot = lazy_slot();
- BIND(&try_handler);
- LoadGlobalIC_TryHandlerCase(vector, slot, lazy_smi_slot, lazy_context,
- lazy_name, typeof_mode, exit_point, &miss);
- }
+ {
+ TNode<FeedbackVector> vector = CAST(maybe_feedback_vector);
+ LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point,
+ &try_handler, &miss);
- BIND(&miss);
- {
- Comment("LoadGlobalIC_MissCase");
- TNode<Context> context = lazy_context();
- TNode<Name> name = lazy_name();
- exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name,
- lazy_smi_slot(), maybe_feedback_vector,
- SmiConstant(typeof_mode));
+ BIND(&try_handler);
+ LoadGlobalIC_TryHandlerCase(vector, slot, lazy_context, lazy_name,
+ typeof_mode, exit_point, &miss);
+ }
+
+ BIND(&miss);
+ {
+ Comment("LoadGlobalIC_MissCase");
+ TNode<Context> context = lazy_context();
+ TNode<Name> name = lazy_name();
+ exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name,
+ slot, maybe_feedback_vector,
+ SmiConstant(typeof_mode));
+ }
}
BIND(&no_feedback);
@@ -2875,7 +2875,7 @@ void AccessorAssembler::LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
}
void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
- TNode<FeedbackVector> vector, TNode<UintPtrT> slot,
+ TNode<FeedbackVector> vector, TNode<TaggedIndex> slot,
const LazyNode<Context>& lazy_context, ExitPoint* exit_point,
Label* try_handler, Label* miss) {
Comment("LoadGlobalIC_TryPropertyCellCase");
@@ -2912,10 +2912,9 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
}
void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
- TNode<FeedbackVector> vector, TNode<UintPtrT> slot,
- const LazyNode<Smi>& lazy_smi_slot, const LazyNode<Context>& lazy_context,
- const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
- ExitPoint* exit_point, Label* miss) {
+ TNode<FeedbackVector> vector, TNode<TaggedIndex> slot,
+ const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name,
+ TypeofMode typeof_mode, ExitPoint* exit_point, Label* miss) {
Comment("LoadGlobalIC_TryHandlerCase");
Label call_handler(this), non_smi(this);
@@ -2937,7 +2936,7 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
LoadContextElement(native_context, Context::EXTENSION_INDEX);
LazyLoadICParameters p([=] { return context; }, receiver, lazy_name,
- lazy_smi_slot, vector, holder);
+ [=] { return slot; }, vector, holder);
HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC,
on_nonexistent);
@@ -3233,7 +3232,7 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
TNode<Map> receiver_map = LoadReceiverMap(receiver);
TNode<Name> name = CAST(p->name());
TNode<FeedbackVector> vector = CAST(p->vector());
- TNode<Smi> slot = p->slot();
+ TNode<TaggedIndex> slot = p->slot();
TNode<Context> context = p->context();
// When we get here, we know that the {name} matches the recorded
@@ -3634,7 +3633,7 @@ void AccessorAssembler::GenerateLoadIC() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3647,7 +3646,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3659,8 +3658,13 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
&if_handler, &var_handler, &miss);
BIND(&if_handler);
- LazyLoadICParameters p([=] { return context; }, receiver,
- [=] { return name; }, [=] { return slot; }, vector);
+ LazyLoadICParameters p(
+ // lazy_context
+ [=] { return context; }, receiver,
+ // lazy_name
+ [=] { return name; },
+ // lazy_slot
+ [=] { return slot; }, vector);
HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
BIND(&miss);
@@ -3673,7 +3677,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3709,10 +3713,9 @@ void AccessorAssembler::GenerateLoadIC_NoFeedback() {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Smi> ic_kind = CAST(Parameter(Descriptor::kICKind));
- LoadICParameters p(
- context, receiver, name,
- SmiConstant(static_cast<int>(FeedbackSlot::Invalid().ToInt())),
- UndefinedConstant());
+ LoadICParameters p(context, receiver, name,
+ TaggedIndexConstant(FeedbackSlot::Invalid().ToInt()),
+ UndefinedConstant());
LoadIC_NoFeedback(&p, ic_kind);
}
@@ -3721,7 +3724,7 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3733,7 +3736,7 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3755,17 +3758,15 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalWithVectorDescriptor;
TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
LoadGlobalIC(
vector,
- // lazy_smi_slot
- [=] { return slot; },
// lazy_slot
- [=] { return Unsigned(SmiUntag(slot)); },
+ [=] { return slot; },
// lazy_context
[=] { return context; },
// lazy_name
@@ -3776,7 +3777,7 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalDescriptor;
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3790,7 +3791,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3803,7 +3804,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3816,7 +3817,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3829,7 +3830,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3842,7 +3843,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3855,7 +3856,7 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3868,7 +3869,7 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3881,7 +3882,7 @@ void AccessorAssembler::GenerateStoreIC() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3895,7 +3896,7 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3909,7 +3910,7 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3923,7 +3924,7 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
@@ -3937,7 +3938,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
TNode<Object> array = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> index = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -4007,7 +4008,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
using Descriptor = CloneObjectWithVectorDescriptor;
TNode<Object> source = CAST(Parameter(Descriptor::kSource));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> maybe_vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TVARIABLE(MaybeObject, var_handler);
@@ -4056,17 +4057,18 @@ void AccessorAssembler::GenerateCloneObjectIC() {
GotoIf(IsEmptyFixedArray(source_properties), &allocate_object);
// This IC requires that the source object has fast properties.
- CSA_SLOW_ASSERT(this, IsPropertyArray(CAST(source_properties)));
- TNode<IntPtrT> length = LoadPropertyArrayLength(
- UncheckedCast<PropertyArray>(source_properties));
+ TNode<PropertyArray> source_property_array = CAST(source_properties);
+
+ TNode<IntPtrT> length = LoadPropertyArrayLength(source_property_array);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &allocate_object);
auto mode = INTPTR_PARAMETERS;
- var_properties = CAST(AllocatePropertyArray(length, mode));
- FillPropertyArrayWithUndefined(var_properties.value(), IntPtrConstant(0),
- length, mode);
- CopyPropertyArrayValues(source_properties, var_properties.value(), length,
+ TNode<PropertyArray> property_array = AllocatePropertyArray(length, mode);
+ FillPropertyArrayWithUndefined(property_array, IntPtrConstant(0), length,
+ mode);
+ CopyPropertyArrayValues(source_property_array, property_array, length,
SKIP_WRITE_BARRIER, mode, DestroySource::kNo);
+ var_properties = property_array;
}
Goto(&allocate_object);
@@ -4171,7 +4173,7 @@ void AccessorAssembler::GenerateKeyedHasIC() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -4195,7 +4197,7 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 3fb9a533ca..49b6694d6d 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
struct LoadICParameters {
LoadICParameters(TNode<Context> context,
base::Optional<TNode<Object>> receiver, TNode<Object> name,
- TNode<Smi> slot, TNode<HeapObject> vector,
+ TNode<TaggedIndex> slot, TNode<HeapObject> vector,
base::Optional<TNode<Object>> holder = base::nullopt)
: context_(context),
receiver_(receiver),
@@ -91,7 +91,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context() const { return context_; }
TNode<Object> receiver() const { return receiver_.value(); }
TNode<Object> name() const { return name_; }
- TNode<Smi> slot() const { return slot_; }
+ TNode<TaggedIndex> slot() const { return slot_; }
TNode<HeapObject> vector() const { return vector_; }
TNode<Object> holder() const { return holder_.value(); }
bool receiver_is_null() const { return !receiver_.has_value(); }
@@ -100,14 +100,14 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context_;
base::Optional<TNode<Object>> receiver_;
TNode<Object> name_;
- TNode<Smi> slot_;
+ TNode<TaggedIndex> slot_;
TNode<HeapObject> vector_;
base::Optional<TNode<Object>> holder_;
};
struct LazyLoadICParameters {
LazyLoadICParameters(LazyNode<Context> context, TNode<Object> receiver,
- LazyNode<Object> name, LazyNode<Smi> slot,
+ LazyNode<Object> name, LazyNode<TaggedIndex> slot,
TNode<HeapObject> vector,
base::Optional<TNode<Object>> holder = base::nullopt)
: context_(context),
@@ -129,7 +129,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context() const { return context_(); }
TNode<Object> receiver() const { return receiver_; }
TNode<Object> name() const { return name_(); }
- TNode<Smi> slot() const { return slot_(); }
+ TNode<TaggedIndex> slot() const { return slot_(); }
TNode<HeapObject> vector() const { return vector_; }
TNode<Object> holder() const { return holder_; }
@@ -137,14 +137,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
LazyNode<Context> context_;
TNode<Object> receiver_;
LazyNode<Object> name_;
- LazyNode<Smi> slot_;
+ LazyNode<TaggedIndex> slot_;
TNode<HeapObject> vector_;
TNode<Object> holder_;
};
void LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
- const LazyNode<Smi>& lazy_smi_slot,
- const LazyNode<UintPtrT>& lazy_slot,
+ const LazyNode<TaggedIndex>& lazy_slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
ExitPoint* exit_point);
@@ -162,8 +161,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
struct StoreICParameters : public LoadICParameters {
StoreICParameters(TNode<Context> context,
base::Optional<TNode<Object>> receiver,
- TNode<Object> name, TNode<Object> value, TNode<Smi> slot,
- TNode<HeapObject> vector)
+ TNode<Object> name, TNode<Object> value,
+ TNode<TaggedIndex> slot, TNode<HeapObject> vector)
: LoadICParameters(context, receiver, name, slot, vector),
value_(value) {}
@@ -243,9 +242,12 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// IC dispatcher behavior.
// Checks monomorphic case. Returns {feedback} entry of the vector.
- TNode<MaybeObject> TryMonomorphicCase(
- TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map,
- Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss);
+ TNode<MaybeObject> TryMonomorphicCase(TNode<TaggedIndex> slot,
+ TNode<FeedbackVector> vector,
+ TNode<Map> receiver_map,
+ Label* if_handler,
+ TVariable<MaybeObject>* var_handler,
+ Label* if_miss);
void HandlePolymorphicCase(TNode<Map> receiver_map,
TNode<WeakFixedArray> feedback, Label* if_handler,
TVariable<MaybeObject>* var_handler,
@@ -309,14 +311,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// LoadGlobalIC implementation.
void LoadGlobalIC_TryPropertyCellCase(TNode<FeedbackVector> vector,
- TNode<UintPtrT> slot,
+ TNode<TaggedIndex> slot,
const LazyNode<Context>& lazy_context,
ExitPoint* exit_point,
Label* try_handler, Label* miss);
void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector,
- TNode<UintPtrT> slot,
- const LazyNode<Smi>& lazy_smi_slot,
+ TNode<TaggedIndex> slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name,
TypeofMode typeof_mode,
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 5f6ff322d2..b508a20f80 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -96,10 +96,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
if (V8_LIKELY(!TracingFlags::is_ic_stats_enabled())) return;
- Map map;
- if (!receiver_map().is_null()) {
- map = *receiver_map();
- }
+ Handle<Map> map = receiver_map(); // Might be empty.
const char* modifier = "";
if (state() == NO_FEEDBACK) {
@@ -116,7 +113,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
if (!(TracingFlags::ic_stats.load(std::memory_order_relaxed) &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- LOG(isolate(), ICEvent(type, keyed_prefix, map, *name,
+ LOG(isolate(), ICEvent(type, keyed_prefix, map, name,
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state), modifier,
slow_stub_reason_));
@@ -125,6 +122,8 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
JavaScriptFrameIterator it(isolate());
JavaScriptFrame* frame = it.frame();
+
+ DisallowHeapAllocation no_gc;
JSFunction function = frame->function();
ICStats::instance()->Begin();
@@ -150,11 +149,13 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.state += TransitionMarkFromState(new_state);
ic_info.state += modifier;
ic_info.state += ")";
- ic_info.map = reinterpret_cast<void*>(map.ptr());
if (!map.is_null()) {
- ic_info.is_dictionary_map = map.is_dictionary_map();
- ic_info.number_of_own_descriptors = map.NumberOfOwnDescriptors();
- ic_info.instance_type = std::to_string(map.instance_type());
+ ic_info.map = reinterpret_cast<void*>(map->ptr());
+ ic_info.is_dictionary_map = map->is_dictionary_map();
+ ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ ic_info.instance_type = std::to_string(map->instance_type());
+ } else {
+ ic_info.map = nullptr;
}
// TODO(lpy) Add name as key field in ICStats.
ICStats::instance()->End();
@@ -426,6 +427,14 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name,
if (name->IsPrivateName() && !it.IsFound()) {
Handle<String> name_string(
String::cast(Symbol::cast(*name).description()), isolate());
+ if (name->IsPrivateBrand()) {
+ Handle<String> class_name =
+ (name_string->length() == 0)
+ ? isolate()->factory()->anonymous_string()
+ : name_string;
+ return TypeError(MessageTemplate::kInvalidPrivateBrand, object,
+ class_name);
+ }
return TypeError(MessageTemplate::kInvalidPrivateMemberRead, object,
name_string);
}
@@ -1485,11 +1494,10 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
if (MigrateDeprecated(isolate(), object)) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::SetProperty(isolate(), object, name, value),
- Object);
- return result;
+ LookupIterator::Key key(isolate(), name);
+ LookupIterator it(isolate(), object, key);
+ MAYBE_RETURN_NULL(Object::SetProperty(&it, value, StoreOrigin::kNamed));
+ return value;
}
bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic;
@@ -2238,7 +2246,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Name> key = args.at<Name>(1);
- Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
@@ -2290,7 +2298,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<JSGlobalObject> global = isolate->global_object();
Handle<String> name = args.at<String>(0);
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
CONVERT_INT32_ARG_CHECKED(typeof_value, 3);
TypeofMode typeof_mode = static_cast<TypeofMode>(typeof_value);
@@ -2318,7 +2326,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
@@ -2335,7 +2343,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Object> key = args.at(1);
- Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(2);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
@@ -2354,7 +2362,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
@@ -2384,7 +2392,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Name> key = args.at<Name>(3);
@@ -2420,7 +2428,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
#ifdef DEBUG
{
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
@@ -2468,7 +2476,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
@@ -2510,7 +2518,7 @@ RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
- Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
@@ -2558,7 +2566,7 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
Handle<Object> key = args.at(1);
Handle<Object> value = args.at(2);
Handle<Map> map = args.at<Map>(3);
- Handle<Smi> slot = args.at<Smi>(4);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(4);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
@@ -2681,10 +2689,11 @@ RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
Handle<Object> source = args.at<Object>(0);
- int flags = args.smi_at(1);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
if (!MigrateDeprecated(isolate, source)) {
- FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
+ CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 2);
+ FeedbackSlot slot = FeedbackVector::ToSlot(index);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
if (maybe_vector->IsFeedbackVector()) {
FeedbackNexus nexus(Handle<FeedbackVector>::cast(maybe_vector), slot);
@@ -2768,7 +2777,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
if (it.IsFound()) return *result;
- Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(3);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
@@ -2859,7 +2868,7 @@ RUNTIME_FUNCTION(Runtime_KeyedHasIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Object> key = args.at(1);
- Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TaggedIndex> slot = args.at<TaggedIndex>(2);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 5d8ea71bcd..88a66e1760 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -617,9 +617,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
Label found(this), found_fast(this), found_dict(this), found_global(this);
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_entry);
- TryLookupProperty(CAST(holder), holder_map, instance_type, name,
- &found_fast, &found_dict, &found_global,
- &var_meta_storage, &var_entry, &next_proto, bailout);
+ TryLookupProperty(holder, holder_map, instance_type, name, &found_fast,
+ &found_dict, &found_global, &var_meta_storage,
+ &var_entry, &next_proto, bailout);
BIND(&found_fast);
{
TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value());
@@ -920,8 +920,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
GotoIfNot(IsCallableMap(setter_map), &not_callable);
- Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, p->context(), setter, receiver, p->value());
+ Call(p->context(), setter, receiver, p->value());
exit_point->Return(p->value());
BIND(&not_callable);
@@ -1054,7 +1053,7 @@ void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
TNode<Object> receiver_maybe_smi = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label miss(this, Label::kDeferred), store_property(this);
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index dc3317588d..ec026bcfcf 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -78,15 +78,12 @@ class V8_EXPORT_PRIVATE StubCache {
Isolate* isolate() { return isolate_; }
- // Ideally we would set kCacheIndexShift to Name::kHashShift, such that
- // the bit field inside the hash field gets shifted out implicitly. However,
- // sizeof(Entry) needs to be a multiple of 1 << kCacheIndexShift, and it
- // isn't clear whether letting one bit of the bit field leak into the index
- // computation is bad enough to warrant an additional shift to get rid of it.
- static const int kCacheIndexShift = 2;
- // The purpose of the static assert is to make us reconsider this choice
- // if the bit field ever grows even more.
- STATIC_ASSERT(kCacheIndexShift == Name::kHashShift - 1);
+ // Setting kCacheIndexShift to Name::kHashShift is convenient because it
+ // causes the bit field inside the hash field to get shifted out implicitly.
+ // Note that kCacheIndexShift must not get too large, because
+ // sizeof(Entry) needs to be a multiple of 1 << kCacheIndexShift (see
+ // the STATIC_ASSERT below, in {entry(...)}).
+ static const int kCacheIndexShift = Name::kHashShift;
static const int kPrimaryTableBits = 11;
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 93d19a42a9..22d33eaf91 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -9,6 +9,7 @@
#include "src/base/ieee754.h"
#include "src/builtins/accessors.h"
#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/microtask-queue.h"
@@ -443,6 +444,7 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
Builtins::Name call,
int len, bool adapt) {
DCHECK(Builtins::HasJSLinkage(call));
+ name = String::Flatten(isolate, name, AllocationType::kOld);
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
name, call, LanguageMode::kStrict);
Handle<JSFunction> fun = isolate->factory()->NewFunction(args);
@@ -1551,9 +1553,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, isolate_->initial_object_prototype(), "toString",
Builtins::kObjectPrototypeToString, 0, true);
native_context()->set_object_to_string(*object_to_string);
- SimpleInstallFunction(isolate_, isolate_->initial_object_prototype(),
- "valueOf", Builtins::kObjectPrototypeValueOf, 0,
- true);
+ Handle<JSFunction> object_value_of = SimpleInstallFunction(
+ isolate_, isolate_->initial_object_prototype(), "valueOf",
+ Builtins::kObjectPrototypeValueOf, 0, true);
+ native_context()->set_object_value_of_function(*object_value_of);
SimpleInstallGetterSetter(
isolate_, isolate_->initial_object_prototype(), factory->proto_string(),
@@ -4305,44 +4308,46 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
{
- // Create %FinalizationGroupPrototype%
- Handle<String> finalization_group_name =
- factory->NewStringFromStaticChars("FinalizationGroup");
- Handle<JSObject> finalization_group_prototype = factory->NewJSObject(
+ // Create %FinalizationRegistryPrototype%
+ Handle<String> finalization_registry_name =
+ factory->NewStringFromStaticChars("FinalizationRegistry");
+ Handle<JSObject> finalization_registry_prototype = factory->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- // Create %FinalizationGroup%
- Handle<JSFunction> finalization_group_fun = CreateFunction(
- isolate(), finalization_group_name, JS_FINALIZATION_GROUP_TYPE,
- JSFinalizationGroup::kHeaderSize, 0, finalization_group_prototype,
- Builtins::kFinalizationGroupConstructor);
+ // Create %FinalizationRegistry%
+ Handle<JSFunction> finalization_registry_fun = CreateFunction(
+ isolate(), finalization_registry_name, JS_FINALIZATION_REGISTRY_TYPE,
+ JSFinalizationRegistry::kHeaderSize, 0, finalization_registry_prototype,
+ Builtins::kFinalizationRegistryConstructor);
InstallWithIntrinsicDefaultProto(
- isolate(), finalization_group_fun,
- Context::JS_FINALIZATION_GROUP_FUNCTION_INDEX);
+ isolate(), finalization_registry_fun,
+ Context::JS_FINALIZATION_REGISTRY_FUNCTION_INDEX);
- finalization_group_fun->shared().DontAdaptArguments();
- finalization_group_fun->shared().set_length(1);
+ finalization_registry_fun->shared().DontAdaptArguments();
+ finalization_registry_fun->shared().set_length(1);
// Install the "constructor" property on the prototype.
- JSObject::AddProperty(isolate(), finalization_group_prototype,
- factory->constructor_string(), finalization_group_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate(), finalization_registry_prototype,
+ factory->constructor_string(),
+ finalization_registry_fun, DONT_ENUM);
- InstallToStringTag(isolate(), finalization_group_prototype,
- finalization_group_name);
+ InstallToStringTag(isolate(), finalization_registry_prototype,
+ finalization_registry_name);
- JSObject::AddProperty(isolate(), global, finalization_group_name,
- finalization_group_fun, DONT_ENUM);
+ JSObject::AddProperty(isolate(), global, finalization_registry_name,
+ finalization_registry_fun, DONT_ENUM);
- SimpleInstallFunction(isolate(), finalization_group_prototype, "register",
- Builtins::kFinalizationGroupRegister, 2, false);
+ SimpleInstallFunction(isolate(), finalization_registry_prototype,
+ "register", Builtins::kFinalizationRegistryRegister,
+ 2, false);
- SimpleInstallFunction(isolate(), finalization_group_prototype, "unregister",
- Builtins::kFinalizationGroupUnregister, 1, false);
+ SimpleInstallFunction(isolate(), finalization_registry_prototype,
+ "unregister",
+ Builtins::kFinalizationRegistryUnregister, 1, false);
- SimpleInstallFunction(isolate(), finalization_group_prototype,
+ SimpleInstallFunction(isolate(), finalization_registry_prototype,
"cleanupSome",
- Builtins::kFinalizationGroupCleanupSome, 0, false);
+ Builtins::kFinalizationRegistryCleanupSome, 0, false);
}
{
// Create %WeakRefPrototype%
@@ -4381,7 +4386,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
}
{
- // Create cleanup iterator for JSFinalizationGroup.
+ // Create cleanup iterator for JSFinalizationRegistry.
Handle<JSObject> iterator_prototype(
native_context()->initial_iterator_prototype(), isolate());
@@ -4390,17 +4395,17 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
JSObject::ForceSetPrototype(cleanup_iterator_prototype, iterator_prototype);
InstallToStringTag(isolate(), cleanup_iterator_prototype,
- "FinalizationGroup Cleanup Iterator");
+ "FinalizationRegistry Cleanup Iterator");
SimpleInstallFunction(isolate(), cleanup_iterator_prototype, "next",
- Builtins::kFinalizationGroupCleanupIteratorNext, 0,
+ Builtins::kFinalizationRegistryCleanupIteratorNext, 0,
true);
Handle<Map> cleanup_iterator_map =
- factory->NewMap(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE,
- JSFinalizationGroupCleanupIterator::kHeaderSize);
+ factory->NewMap(JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE,
+ JSFinalizationRegistryCleanupIterator::kHeaderSize);
Map::SetPrototype(isolate(), cleanup_iterator_map,
cleanup_iterator_prototype);
- native_context()->set_js_finalization_group_cleanup_iterator_map(
+ native_context()->set_js_finalization_registry_cleanup_iterator_map(
*cleanup_iterator_map);
}
}
@@ -4992,7 +4997,7 @@ bool Genesis::InstallSpecialObjects(Isolate* isolate,
WasmJs::Install(isolate, true);
} else if (FLAG_validate_asm) {
// Install the internal data structures only; these are needed for asm.js
- // translated to WASM to work correctly.
+ // translated to Wasm to work correctly.
WasmJs::Install(isolate, false);
}
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 7131b80860..d56cc4b0cd 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -359,6 +359,7 @@
V(_, strict_function_transition_symbol) \
V(_, wasm_exception_tag_symbol) \
V(_, wasm_exception_values_symbol) \
+ V(_, wasm_uncatchable_symbol) \
V(_, uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST_GENERATOR(V, _) \
diff --git a/deps/v8/src/inspector/custom-preview.cc b/deps/v8/src/inspector/custom-preview.cc
index aa5652a42d..393e0f15c5 100644
--- a/deps/v8/src/inspector/custom-preview.cc
+++ b/deps/v8/src/inspector/custom-preview.cc
@@ -116,13 +116,13 @@ bool substituteObjectTags(int sessionId, const String16& groupName,
protocol::Response response =
injectedScript->wrapObject(originValue, groupName, WrapMode::kNoPreview,
configValue, maxDepth - 1, &wrapper);
- if (!response.isSuccess() || !wrapper) {
+ if (!response.IsSuccess() || !wrapper) {
reportError(context, tryCatch, "cannot wrap value");
return false;
}
- std::vector<uint8_t> cbor = std::move(*wrapper).TakeSerialized();
std::vector<uint8_t> json;
- v8_crdtp::json::ConvertCBORToJSON(v8_crdtp::SpanFrom(cbor), &json);
+ v8_crdtp::json::ConvertCBORToJSON(v8_crdtp::SpanFrom(wrapper->Serialize()),
+ &json);
v8::Local<v8::Value> jsonWrapper;
v8_inspector::StringView serialized(json.data(), json.size());
if (!v8::JSON::Parse(context, toV8String(isolate, serialized))
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index fb3fa9b069..734c70141d 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -179,7 +179,7 @@ class InjectedScript::ProtocolPromiseHandler {
if (!session) return;
InjectedScript::ContextScope scope(session, m_executionContextId);
Response response = scope.initialize();
- if (!response.isSuccess()) return;
+ if (!response.IsSuccess()) return;
std::unique_ptr<EvaluateCallback> callback =
scope.injectedScript()->takeEvaluateCallback(m_callback);
@@ -210,7 +210,7 @@ class InjectedScript::ProtocolPromiseHandler {
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue;
response = scope.injectedScript()->wrapObject(result, m_objectGroup,
m_wrapMode, &wrappedValue);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -224,14 +224,14 @@ class InjectedScript::ProtocolPromiseHandler {
if (!session) return;
InjectedScript::ContextScope scope(session, m_executionContextId);
Response response = scope.initialize();
- if (!response.isSuccess()) return;
+ if (!response.IsSuccess()) return;
std::unique_ptr<EvaluateCallback> callback =
scope.injectedScript()->takeEvaluateCallback(m_callback);
if (!callback) return;
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue;
response = scope.injectedScript()->wrapObject(result, m_objectGroup,
m_wrapMode, &wrappedValue);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -251,7 +251,7 @@ class InjectedScript::ProtocolPromiseHandler {
exceptionDetails;
response = scope.injectedScript()->createExceptionDetails(
message, exception, m_objectGroup, &exceptionDetails);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -297,7 +297,7 @@ class InjectedScript::ProtocolPromiseHandler {
.build();
response = scope.injectedScript()->addExceptionToDetails(
result, exceptionDetails.get(), m_objectGroup);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -315,11 +315,11 @@ class InjectedScript::ProtocolPromiseHandler {
if (!session) return;
InjectedScript::ContextScope scope(session, m_executionContextId);
Response response = scope.initialize();
- if (!response.isSuccess()) return;
+ if (!response.IsSuccess()) return;
std::unique_ptr<EvaluateCallback> callback =
scope.injectedScript()->takeEvaluateCallback(m_callback);
if (!callback) return;
- callback->sendFailure(Response::Error("Promise was collected"));
+ callback->sendFailure(Response::ServerError("Promise was collected"));
}
V8InspectorImpl* m_inspector;
@@ -380,60 +380,59 @@ Response InjectedScript::getProperties(
.setEnumerable(mirror.enumerable)
.setIsOwn(mirror.isOwn)
.build();
- Response response;
std::unique_ptr<RemoteObject> remoteObject;
if (mirror.value) {
- response = wrapObjectMirror(*mirror.value, groupName, wrapMode,
- v8::MaybeLocal<v8::Value>(),
- kMaxCustomPreviewDepth, &remoteObject);
- if (!response.isSuccess()) return response;
+ Response response = wrapObjectMirror(
+ *mirror.value, groupName, wrapMode, v8::MaybeLocal<v8::Value>(),
+ kMaxCustomPreviewDepth, &remoteObject);
+ if (!response.IsSuccess()) return response;
descriptor->setValue(std::move(remoteObject));
descriptor->setWritable(mirror.writable);
}
if (mirror.getter) {
- response =
+ Response response =
mirror.getter->buildRemoteObject(context, wrapMode, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response =
bindRemoteObjectIfNeeded(sessionId, context, mirror.getter->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setGet(std::move(remoteObject));
}
if (mirror.setter) {
- response =
+ Response response =
mirror.setter->buildRemoteObject(context, wrapMode, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response =
bindRemoteObjectIfNeeded(sessionId, context, mirror.setter->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setSet(std::move(remoteObject));
}
if (mirror.symbol) {
- response =
+ Response response =
mirror.symbol->buildRemoteObject(context, wrapMode, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response =
bindRemoteObjectIfNeeded(sessionId, context, mirror.symbol->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setSymbol(std::move(remoteObject));
}
if (mirror.exception) {
- response =
+ Response response =
mirror.exception->buildRemoteObject(context, wrapMode, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response = bindRemoteObjectIfNeeded(sessionId, context,
mirror.exception->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setValue(std::move(remoteObject));
descriptor->setWasThrown(true);
}
(*properties)->emplace_back(std::move(descriptor));
}
- return Response::OK();
+ return Response::Success();
}
Response InjectedScript::getInternalAndPrivateProperties(
@@ -445,7 +444,7 @@ Response InjectedScript::getInternalAndPrivateProperties(
*internalProperties = std::make_unique<Array<InternalPropertyDescriptor>>();
*privateProperties = std::make_unique<Array<PrivatePropertyDescriptor>>();
- if (!value->IsObject()) return Response::OK();
+ if (!value->IsObject()) return Response::Success();
v8::Local<v8::Object> value_obj = value.As<v8::Object>();
@@ -458,11 +457,11 @@ Response InjectedScript::getInternalAndPrivateProperties(
std::unique_ptr<RemoteObject> remoteObject;
Response response = internalProperty.value->buildRemoteObject(
m_context->context(), WrapMode::kNoPreview, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response = bindRemoteObjectIfNeeded(sessionId, context,
internalProperty.value->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
(*internalProperties)
->emplace_back(InternalPropertyDescriptor::create()
.setName(internalProperty.name)
@@ -479,50 +478,53 @@ Response InjectedScript::getInternalAndPrivateProperties(
.build();
std::unique_ptr<RemoteObject> remoteObject;
- Response response;
DCHECK((privateProperty.getter || privateProperty.setter) ^
(!!privateProperty.value));
if (privateProperty.value) {
- response = privateProperty.value->buildRemoteObject(
+ Response response = privateProperty.value->buildRemoteObject(
context, WrapMode::kNoPreview, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response = bindRemoteObjectIfNeeded(sessionId, context,
privateProperty.value->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setValue(std::move(remoteObject));
}
if (privateProperty.getter) {
- response = privateProperty.getter->buildRemoteObject(
+ Response response = privateProperty.getter->buildRemoteObject(
context, WrapMode::kNoPreview, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response = bindRemoteObjectIfNeeded(sessionId, context,
privateProperty.getter->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setGet(std::move(remoteObject));
}
if (privateProperty.setter) {
- response = privateProperty.setter->buildRemoteObject(
+ Response response = privateProperty.setter->buildRemoteObject(
context, WrapMode::kNoPreview, &remoteObject);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response = bindRemoteObjectIfNeeded(sessionId, context,
privateProperty.setter->v8Value(),
groupName, remoteObject.get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
descriptor->setSet(std::move(remoteObject));
}
(*privateProperties)->emplace_back(std::move(descriptor));
}
- return Response::OK();
+ return Response::Success();
}
void InjectedScript::releaseObject(const String16& objectId) {
+ std::vector<uint8_t> cbor;
+ v8_crdtp::json::ConvertJSONToCBOR(
+ v8_crdtp::span<uint16_t>(objectId.characters16(), objectId.length()),
+ &cbor);
std::unique_ptr<protocol::Value> parsedObjectId =
- protocol::StringUtil::parseJSON(objectId);
+ protocol::Value::parseBinary(cbor.data(), cbor.size());
if (!parsedObjectId) return;
protocol::DictionaryValue* object =
protocol::DictionaryValue::cast(parsedObjectId.get());
@@ -560,11 +562,11 @@ Response InjectedScript::wrapObjectMirror(
v8::Local<v8::Context> context = m_context->context();
v8::Context::Scope contextScope(context);
Response response = mirror.buildRemoteObject(context, wrapMode, result);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
v8::Local<v8::Value> value = mirror.v8Value();
response = bindRemoteObjectIfNeeded(sessionId, context, value, groupName,
result->get());
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (customPreviewEnabled && value->IsObject()) {
std::unique_ptr<protocol::Runtime::CustomPreview> customPreview;
generateCustomPreview(sessionId, groupName, value.As<v8::Object>(),
@@ -572,7 +574,7 @@ Response InjectedScript::wrapObjectMirror(
&customPreview);
if (customPreview) (*result)->setCustomPreview(std::move(customPreview));
}
- return Response::OK();
+ return Response::Success();
}
std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
@@ -589,7 +591,7 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
std::unique_ptr<RemoteObject> remoteObject;
Response response =
wrapObject(table, "console", WrapMode::kNoPreview, &remoteObject);
- if (!remoteObject || !response.isSuccess()) return nullptr;
+ if (!remoteObject || !response.IsSuccess()) return nullptr;
auto mirror = ValueMirror::create(context, table);
std::unique_ptr<ObjectPreview> preview;
@@ -658,7 +660,8 @@ void InjectedScript::addPromiseCallback(
void InjectedScript::discardEvaluateCallbacks() {
for (auto& callback : m_evaluateCallbacks) {
- callback->sendFailure(Response::Error("Execution context was destroyed."));
+ callback->sendFailure(
+ Response::ServerError("Execution context was destroyed."));
delete callback;
}
m_evaluateCallbacks.clear();
@@ -677,9 +680,9 @@ Response InjectedScript::findObject(const RemoteObjectId& objectId,
v8::Local<v8::Value>* outObject) const {
auto it = m_idToWrappedObject.find(objectId.id());
if (it == m_idToWrappedObject.end())
- return Response::Error("Could not find object with given id");
+ return Response::ServerError("Could not find object with given id");
*outObject = it->second.Get(m_context->isolate());
- return Response::OK();
+ return Response::Success();
}
String16 InjectedScript::objectGroupName(const RemoteObjectId& objectId) const {
@@ -719,9 +722,9 @@ Response InjectedScript::resolveCallArgument(
std::unique_ptr<RemoteObjectId> remoteObjectId;
Response response =
RemoteObjectId::parse(callArgument->getObjectId(""), &remoteObjectId);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (remoteObjectId->contextId() != m_context->contextId())
- return Response::Error(
+ return Response::ServerError(
"Argument should belong to the same JavaScript world as target "
"object");
return findObject(*remoteObjectId, result);
@@ -729,10 +732,10 @@ Response InjectedScript::resolveCallArgument(
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
String16 value;
if (callArgument->hasValue()) {
- std::vector<uint8_t> cbor =
- std::move(*callArgument->getValue(nullptr)).TakeSerialized();
std::vector<uint8_t> json;
- v8_crdtp::json::ConvertCBORToJSON(v8_crdtp::SpanFrom(cbor), &json);
+ v8_crdtp::json::ConvertCBORToJSON(
+ v8_crdtp::SpanFrom(callArgument->getValue(nullptr)->Serialize()),
+ &json);
value =
"(" +
String16(reinterpret_cast<const char*>(json.data()), json.size()) +
@@ -749,28 +752,29 @@ Response InjectedScript::resolveCallArgument(
->compileAndRunInternalScript(
m_context->context(), toV8String(m_context->isolate(), value))
.ToLocal(result)) {
- return Response::Error("Couldn't parse value object in call argument");
+ return Response::ServerError(
+ "Couldn't parse value object in call argument");
}
- return Response::OK();
+ return Response::Success();
}
*result = v8::Undefined(m_context->isolate());
- return Response::OK();
+ return Response::Success();
}
Response InjectedScript::addExceptionToDetails(
v8::Local<v8::Value> exception,
protocol::Runtime::ExceptionDetails* exceptionDetails,
const String16& objectGroup) {
- if (exception.IsEmpty()) return Response::OK();
+ if (exception.IsEmpty()) return Response::Success();
std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
Response response =
wrapObject(exception, objectGroup,
exception->IsNativeError() ? WrapMode::kNoPreview
: WrapMode::kWithPreview,
&wrapped);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
exceptionDetails->setException(std::move(wrapped));
- return Response::OK();
+ return Response::Success();
}
Response InjectedScript::createExceptionDetails(
@@ -817,9 +821,9 @@ Response InjectedScript::createExceptionDetails(
}
Response response =
addExceptionToDetails(exception, exceptionDetails.get(), objectGroup);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
*result = std::move(exceptionDetails);
- return Response::OK();
+ return Response::Success();
}
Response InjectedScript::wrapEvaluateResult(
@@ -832,14 +836,14 @@ Response InjectedScript::wrapEvaluateResult(
if (!maybeResultValue.ToLocal(&resultValue))
return Response::InternalError();
Response response = wrapObject(resultValue, objectGroup, wrapMode, result);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (objectGroup == "console") {
m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
m_lastEvaluationResult.AnnotateStrongRetainer(kGlobalHandleLabel);
}
} else {
if (tryCatch.HasTerminated() || !tryCatch.CanContinue()) {
- return Response::Error("Execution was terminated");
+ return Response::ServerError("Execution was terminated");
}
v8::Local<v8::Value> exception = tryCatch.Exception();
Response response =
@@ -847,13 +851,13 @@ Response InjectedScript::wrapEvaluateResult(
exception->IsNativeError() ? WrapMode::kNoPreview
: WrapMode::kWithPreview,
result);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
// We send exception in result for compatibility reasons, even though it's
// accessible through exceptionDetails.exception.
response = createExceptionDetails(tryCatch, objectGroup, exceptionDetails);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
}
- return Response::OK();
+ return Response::Success();
}
v8::Local<v8::Object> InjectedScript::commandLineAPI() {
@@ -885,11 +889,11 @@ Response InjectedScript::Scope::initialize() {
m_inspector->sessionById(m_contextGroupId, m_sessionId);
if (!session) return Response::InternalError();
Response response = findInjectedScript(session);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
m_context = m_injectedScript->context()->context();
m_context->Enter();
if (m_allowEval) m_context->AllowCodeGenerationFromStrings(true);
- return Response::OK();
+ return Response::Success();
}
void InjectedScript::Scope::installCommandLineAPI() {
@@ -972,15 +976,15 @@ Response InjectedScript::ObjectScope::findInjectedScript(
V8InspectorSessionImpl* session) {
std::unique_ptr<RemoteObjectId> remoteId;
Response response = RemoteObjectId::parse(m_remoteObjectId, &remoteId);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
InjectedScript* injectedScript = nullptr;
response = session->findInjectedScript(remoteId.get(), injectedScript);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
m_objectGroupName = injectedScript->objectGroupName(*remoteId);
response = injectedScript->findObject(*remoteId, &m_object);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
m_injectedScript = injectedScript;
- return Response::OK();
+ return Response::Success();
}
InjectedScript::CallFrameScope::CallFrameScope(V8InspectorSessionImpl* session,
@@ -993,7 +997,7 @@ Response InjectedScript::CallFrameScope::findInjectedScript(
V8InspectorSessionImpl* session) {
std::unique_ptr<RemoteCallFrameId> remoteId;
Response response = RemoteCallFrameId::parse(m_remoteCallFrameId, &remoteId);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
m_frameOrdinal = static_cast<size_t>(remoteId->frameOrdinal());
return session->findInjectedScript(remoteId.get(), m_injectedScript);
}
@@ -1018,9 +1022,9 @@ String16 InjectedScript::bindObject(v8::Local<v8::Value> value,
Response InjectedScript::bindRemoteObjectIfNeeded(
int sessionId, v8::Local<v8::Context> context, v8::Local<v8::Value> value,
const String16& groupName, protocol::Runtime::RemoteObject* remoteObject) {
- if (!remoteObject) return Response::OK();
- if (remoteObject->hasValue()) return Response::OK();
- if (remoteObject->hasUnserializableValue()) return Response::OK();
+ if (!remoteObject) return Response::Success();
+ if (remoteObject->hasValue()) return Response::Success();
+ if (remoteObject->hasUnserializableValue()) return Response::Success();
if (remoteObject->getType() != RemoteObject::TypeEnum::Undefined) {
v8::Isolate* isolate = context->GetIsolate();
V8InspectorImpl* inspector =
@@ -1031,11 +1035,11 @@ Response InjectedScript::bindRemoteObjectIfNeeded(
inspectedContext ? inspectedContext->getInjectedScript(sessionId)
: nullptr;
if (!injectedScript) {
- return Response::Error("Cannot find context with specified id");
+ return Response::ServerError("Cannot find context with specified id");
}
remoteObject->setObjectId(injectedScript->bindObject(value, groupName));
}
- return Response::OK();
+ return Response::Success();
}
void InjectedScript::unbindObject(int id) {
diff --git a/deps/v8/src/inspector/remote-object-id.cc b/deps/v8/src/inspector/remote-object-id.cc
index 2f5f051816..e3c67bb6c9 100644
--- a/deps/v8/src/inspector/remote-object-id.cc
+++ b/deps/v8/src/inspector/remote-object-id.cc
@@ -4,6 +4,7 @@
#include "src/inspector/remote-object-id.h"
+#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
@@ -13,8 +14,12 @@ RemoteObjectIdBase::RemoteObjectIdBase() : m_injectedScriptId(0) {}
std::unique_ptr<protocol::DictionaryValue>
RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
+ std::vector<uint8_t> cbor;
+ v8_crdtp::json::ConvertJSONToCBOR(
+ v8_crdtp::span<uint16_t>(objectId.characters16(), objectId.length()),
+ &cbor);
std::unique_ptr<protocol::Value> parsedValue =
- protocol::StringUtil::parseJSON(objectId);
+ protocol::Value::parseBinary(cbor.data(), cbor.size());
if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
return nullptr;
@@ -33,12 +38,12 @@ Response RemoteObjectId::parse(const String16& objectId,
std::unique_ptr<RemoteObjectId> remoteObjectId(new RemoteObjectId());
std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
remoteObjectId->parseInjectedScriptId(objectId);
- if (!parsedObjectId) return Response::Error("Invalid remote object id");
+ if (!parsedObjectId) return Response::ServerError("Invalid remote object id");
bool success = parsedObjectId->getInteger("id", &remoteObjectId->m_id);
- if (!success) return Response::Error("Invalid remote object id");
+ if (!success) return Response::ServerError("Invalid remote object id");
*result = std::move(remoteObjectId);
- return Response::OK();
+ return Response::Success();
}
RemoteCallFrameId::RemoteCallFrameId()
@@ -49,13 +54,13 @@ Response RemoteCallFrameId::parse(const String16& objectId,
std::unique_ptr<RemoteCallFrameId> remoteCallFrameId(new RemoteCallFrameId());
std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
remoteCallFrameId->parseInjectedScriptId(objectId);
- if (!parsedObjectId) return Response::Error("Invalid call frame id");
+ if (!parsedObjectId) return Response::ServerError("Invalid call frame id");
bool success =
parsedObjectId->getInteger("ordinal", &remoteCallFrameId->m_frameOrdinal);
- if (!success) return Response::Error("Invalid call frame id");
+ if (!success) return Response::ServerError("Invalid call frame id");
*result = std::move(remoteCallFrameId);
- return Response::OK();
+ return Response::Success();
}
String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 15344ee2b8..43b581a1b5 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -208,21 +208,8 @@ String16 String16::fromUTF8(const char* stringStart, size_t length) {
return String16(UTF8ToUTF16(stringStart, length));
}
-String16 String16::fromUTF16LE(const UChar* stringStart, size_t length) {
-#ifdef V8_TARGET_BIG_ENDIAN
- // Need to flip the byte order on big endian machines.
- String16Builder builder;
- builder.reserveCapacity(length);
- for (size_t i = 0; i < length; i++) {
- const UChar utf16be_char =
- stringStart[i] << 8 | (stringStart[i] >> 8 & 0x00FF);
- builder.append(utf16be_char);
- }
- return builder.toString();
-#else
- // No need to do anything on little endian machines.
+String16 String16::fromUTF16(const UChar* stringStart, size_t length) {
return String16(stringStart, length);
-#endif // V8_TARGET_BIG_ENDIAN
}
std::string String16::utf8() const {
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 16d2b15527..b4cbd016a6 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -70,11 +70,7 @@ class String16 {
// Convenience methods.
V8_EXPORT std::string utf8() const;
V8_EXPORT static String16 fromUTF8(const char* stringStart, size_t length);
-
- // Instantiates a String16 in native endianness from UTF16 LE.
- // On Big endian architectures, byte order needs to be flipped.
- V8_EXPORT static String16 fromUTF16LE(const UChar* stringStart,
- size_t length);
+ V8_EXPORT static String16 fromUTF16(const UChar* stringStart, size_t length);
std::size_t hash() const {
if (!hash_code) {
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index e37b59b1c8..3f779e86e1 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -95,75 +95,43 @@ bool stringViewStartsWith(const StringView& string, const char* prefix) {
return true;
}
-namespace protocol {
-
-// static
-double StringUtil::toDouble(const char* s, size_t len, bool* isOk) {
- int flags = v8::internal::ALLOW_HEX | v8::internal::ALLOW_OCTAL |
- v8::internal::ALLOW_BINARY;
- double result = v8::internal::StringToDouble(s, flags);
- *isOk = !std::isnan(result);
- return result;
-}
-
-std::unique_ptr<protocol::Value> StringUtil::parseJSON(
- const StringView& string) {
- if (!string.length()) return nullptr;
- if (string.is8Bit()) {
- return parseJSONCharacters(string.characters8(),
- static_cast<int>(string.length()));
- }
- return parseJSONCharacters(string.characters16(),
- static_cast<int>(string.length()));
-}
-
-std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
- if (!string.length()) return nullptr;
- return parseJSONCharacters(string.characters16(),
- static_cast<int>(string.length()));
-}
-} // namespace protocol
-
namespace {
// An empty string buffer doesn't own any string data; its ::string() returns a
// default-constructed StringView instance.
class EmptyStringBuffer : public StringBuffer {
public:
- const StringView& string() override { return string_; }
-
- private:
- StringView string_;
+ StringView string() const override { return StringView(); }
};
// Contains LATIN1 text data or CBOR encoded binary data in a vector.
class StringBuffer8 : public StringBuffer {
public:
- explicit StringBuffer8(std::vector<uint8_t> data)
- : data_(std::move(data)), string_(data_.data(), data_.size()) {}
+ explicit StringBuffer8(std::vector<uint8_t> data) : data_(std::move(data)) {}
- const StringView& string() override { return string_; }
+ StringView string() const override {
+ return StringView(data_.data(), data_.size());
+ }
private:
std::vector<uint8_t> data_;
- StringView string_;
};
// Contains a 16 bit string (String16).
class StringBuffer16 : public StringBuffer {
public:
- explicit StringBuffer16(String16 data)
- : data_(std::move(data)), string_(data_.characters16(), data_.length()) {}
+ explicit StringBuffer16(String16 data) : data_(std::move(data)) {}
- const StringView& string() override { return string_; }
+ StringView string() const override {
+ return StringView(data_.characters16(), data_.length());
+ }
private:
String16 data_;
- StringView string_;
};
} // namespace
// static
-std::unique_ptr<StringBuffer> StringBuffer::create(const StringView& string) {
+std::unique_ptr<StringBuffer> StringBuffer::create(StringView string) {
if (string.length() == 0) return std::make_unique<EmptyStringBuffer>();
if (string.is8Bit()) {
return std::make_unique<StringBuffer8>(std::vector<uint8_t>(
@@ -190,3 +158,10 @@ String16 stackTraceIdToString(uintptr_t id) {
}
} // namespace v8_inspector
+
+namespace v8_crdtp {
+void SerializerTraits<v8_inspector::protocol::Binary>::Serialize(
+ const v8_inspector::protocol::Binary& binary, std::vector<uint8_t>* out) {
+ cbor::EncodeBinary(span<uint8_t>(binary.data(), binary.size()), out);
+}
+} // namespace v8_crdtp
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 15925b663c..50d3614e54 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -21,59 +21,15 @@ namespace protocol {
class Value;
using String = v8_inspector::String16;
-using StringBuilder = v8_inspector::String16Builder;
class StringUtil {
public:
- static String substring(const String& s, size_t pos, size_t len) {
- return s.substring(pos, len);
- }
- static String fromInteger(int number) { return String::fromInteger(number); }
- static String fromInteger(size_t number) {
- return String::fromInteger(number);
- }
- static String fromDouble(double number) { return String::fromDouble(number); }
- static double toDouble(const char* s, size_t len, bool* isOk);
- static size_t find(const String& s, const char* needle) {
- return s.find(needle);
- }
- static size_t find(const String& s, const String& needle) {
- return s.find(needle);
- }
- static const size_t kNotFound = String::kNotFound;
- static void builderAppend(
- StringBuilder& builder, // NOLINT(runtime/references)
- const String& s) {
- builder.append(s);
- }
- static void builderAppend(
- StringBuilder& builder, // NOLINT(runtime/references)
- UChar c) {
- builder.append(c);
- }
- static void builderAppend(
- StringBuilder& builder, // NOLINT(runtime/references)
- const char* s, size_t len) {
- builder.append(s, len);
- }
- static void builderReserve(
- StringBuilder& builder, // NOLINT(runtime/references)
- size_t capacity) {
- builder.reserveCapacity(capacity);
- }
- static String builderToString(
- StringBuilder& builder) { // NOLINT(runtime/references)
- return builder.toString();
- }
- static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
- static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
-
static String fromUTF8(const uint8_t* data, size_t length) {
return String16::fromUTF8(reinterpret_cast<const char*>(data), length);
}
static String fromUTF16LE(const uint16_t* data, size_t length) {
- return String16::fromUTF16LE(data, length);
+ return String16::fromUTF16(data, length);
}
static const uint8_t* CharactersLatin1(const String& s) { return nullptr; }
@@ -133,4 +89,13 @@ String16 stackTraceIdToString(uintptr_t id);
} // namespace v8_inspector
+// See third_party/inspector_protocol/crdtp/serializer_traits.h.
+namespace v8_crdtp {
+template <>
+struct SerializerTraits<v8_inspector::protocol::Binary> {
+ static void Serialize(const v8_inspector::protocol::Binary& binary,
+ std::vector<uint8_t>* out);
+};
+} // namespace v8_crdtp
+
#endif // V8_INSPECTOR_STRING_UTIL_H_
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.cc b/deps/v8/src/inspector/v8-console-agent-impl.cc
index 66c96110d7..eb00c5cdb5 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-console-agent-impl.cc
@@ -27,23 +27,23 @@ V8ConsoleAgentImpl::V8ConsoleAgentImpl(
V8ConsoleAgentImpl::~V8ConsoleAgentImpl() = default;
Response V8ConsoleAgentImpl::enable() {
- if (m_enabled) return Response::OK();
+ if (m_enabled) return Response::Success();
m_state->setBoolean(ConsoleAgentState::consoleEnabled, true);
m_enabled = true;
m_session->inspector()->enableStackCapturingIfNeeded();
reportAllMessages();
- return Response::OK();
+ return Response::Success();
}
Response V8ConsoleAgentImpl::disable() {
- if (!m_enabled) return Response::OK();
+ if (!m_enabled) return Response::Success();
m_session->inspector()->disableStackCapturingIfNeeded();
m_state->setBoolean(ConsoleAgentState::consoleEnabled, false);
m_enabled = false;
- return Response::OK();
+ return Response::Success();
}
-Response V8ConsoleAgentImpl::clearMessages() { return Response::OK(); }
+Response V8ConsoleAgentImpl::clearMessages() { return Response::Success(); }
void V8ConsoleAgentImpl::restore() {
if (!m_state->booleanProperty(ConsoleAgentState::consoleEnabled, false))
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index f4d0ffa055..ec7709f8c6 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -595,7 +595,7 @@ static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject;
protocol::Response response = injectedScript->wrapObject(
value, "", WrapMode::kNoPreview, &wrappedObject);
- if (!response.isSuccess()) return;
+ if (!response.IsSuccess()) return;
std::unique_ptr<protocol::DictionaryValue> hints =
protocol::DictionaryValue::create();
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 0176bef645..2e5c8cd417 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -6,10 +6,12 @@
#include <algorithm>
+#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/base/safe_conversions.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/remote-object-id.h"
#include "src/inspector/search-util.h"
@@ -241,6 +243,8 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type) {
return Scope::TypeEnum::Eval;
case v8::debug::ScopeIterator::ScopeTypeModule:
return Scope::TypeEnum::Module;
+ case v8::debug::ScopeIterator::ScopeTypeWasmExpressionStack:
+ return Scope::TypeEnum::WasmExpressionStack;
}
UNREACHABLE();
return String16();
@@ -250,8 +254,8 @@ Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
InjectedScript* injectedScript,
std::unique_ptr<Array<Scope>>* scopes) {
*scopes = std::make_unique<Array<Scope>>();
- if (!injectedScript) return Response::OK();
- if (iterator->Done()) return Response::OK();
+ if (!injectedScript) return Response::Success();
+ if (iterator->Done()) return Response::Success();
String16 scriptId = String16::fromInteger(iterator->GetScriptId());
@@ -260,7 +264,7 @@ Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
Response result =
injectedScript->wrapObject(iterator->GetObject(), kBacktraceObjectGroup,
WrapMode::kNoPreview, &object);
- if (!result.isSuccess()) return result;
+ if (!result.IsSuccess()) return result;
auto scope = Scope::create()
.setType(scopeType(iterator->GetType()))
@@ -288,7 +292,7 @@ Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
}
(*scopes)->emplace_back(std::move(scope));
}
- return Response::OK();
+ return Response::Success();
}
protocol::DictionaryValue* getOrCreateObject(protocol::DictionaryValue* object,
@@ -342,17 +346,17 @@ Response V8DebuggerAgentImpl::enable(Maybe<double> maxScriptsCacheSize,
maxScriptsCacheSize.fromMaybe(std::numeric_limits<double>::max()));
*outDebuggerId =
m_debugger->debuggerIdFor(m_session->contextGroupId()).toString();
- if (enabled()) return Response::OK();
+ if (enabled()) return Response::Success();
if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
- return Response::Error("Script execution is prohibited");
+ return Response::ServerError("Script execution is prohibited");
enableImpl();
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::disable() {
- if (!enabled()) return Response::OK();
+ if (!enabled()) return Response::Success();
m_state->remove(DebuggerAgentState::breakpointsByRegex);
m_state->remove(DebuggerAgentState::breakpointsByUrl);
@@ -387,7 +391,7 @@ Response V8DebuggerAgentImpl::disable() {
m_enabled = false;
m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
m_debugger->disable();
- return Response::OK();
+ return Response::Success();
}
void V8DebuggerAgentImpl::restore() {
@@ -419,21 +423,21 @@ void V8DebuggerAgentImpl::restore() {
}
Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (m_breakpointsActive == active) return Response::OK();
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+ if (m_breakpointsActive == active) return Response::Success();
m_breakpointsActive = active;
m_debugger->setBreakpointsActive(active);
if (!active && !m_breakReason.empty()) {
clearBreakDetails();
m_debugger->setPauseOnNextCall(false, m_session->contextGroupId());
}
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
m_state->setBoolean(DebuggerAgentState::skipAllPauses, skip);
m_skipAllPauses = skip;
- return Response::OK();
+ return Response::Success();
}
static bool matches(V8InspectorImpl* inspector, const V8DebuggerScript& script,
@@ -467,13 +471,14 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
(optionalURLRegex.isJust() ? 1 : 0) +
(optionalScriptHash.isJust() ? 1 : 0);
if (specified != 1) {
- return Response::Error(
+ return Response::ServerError(
"Either url or urlRegex or scriptHash must be specified.");
}
int columnNumber = 0;
if (optionalColumnNumber.isJust()) {
columnNumber = optionalColumnNumber.fromJust();
- if (columnNumber < 0) return Response::Error("Incorrect column number");
+ if (columnNumber < 0)
+ return Response::ServerError("Incorrect column number");
}
BreakpointType type = BreakpointType::kByUrl;
@@ -513,7 +518,8 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
UNREACHABLE();
}
if (breakpoints->get(breakpointId)) {
- return Response::Error("Breakpoint at specified location already exists.");
+ return Response::ServerError(
+ "Breakpoint at specified location already exists.");
}
String16 hint;
@@ -537,7 +543,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
breakpointHints->setString(breakpointId, hint);
}
*outBreakpointId = breakpointId;
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setBreakpoint(
@@ -549,15 +555,17 @@ Response V8DebuggerAgentImpl::setBreakpoint(
location->getLineNumber(), location->getColumnNumber(0));
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
- return Response::Error("Breakpoint at specified location already exists.");
+ return Response::ServerError(
+ "Breakpoint at specified location already exists.");
}
*actualLocation = setBreakpointImpl(breakpointId, location->getScriptId(),
optionalCondition.fromMaybe(String16()),
location->getLineNumber(),
location->getColumnNumber(0));
- if (!*actualLocation) return Response::Error("Could not resolve breakpoint");
+ if (!*actualLocation)
+ return Response::ServerError("Could not resolve breakpoint");
*outBreakpointId = breakpointId;
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setBreakpointOnFunctionCall(
@@ -565,9 +573,9 @@ Response V8DebuggerAgentImpl::setBreakpointOnFunctionCall(
String16* outBreakpointId) {
InjectedScript::ObjectScope scope(m_session, functionObjectId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (!scope.object()->IsFunction()) {
- return Response::Error("Could not find function with given id");
+ return Response::ServerError("Could not find function with given id");
}
v8::Local<v8::Function> function =
v8::Local<v8::Function>::Cast(scope.object());
@@ -575,35 +583,37 @@ Response V8DebuggerAgentImpl::setBreakpointOnFunctionCall(
generateBreakpointId(BreakpointType::kBreakpointAtEntry, function);
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
- return Response::Error("Breakpoint at specified location already exists.");
+ return Response::ServerError(
+ "Breakpoint at specified location already exists.");
}
v8::Local<v8::String> condition =
toV8String(m_isolate, optionalCondition.fromMaybe(String16()));
setBreakpointImpl(breakpointId, function, condition);
*outBreakpointId = breakpointId;
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setInstrumentationBreakpoint(
const String16& instrumentation, String16* outBreakpointId) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
String16 breakpointId = generateInstrumentationBreakpointId(instrumentation);
protocol::DictionaryValue* breakpoints = getOrCreateObject(
m_state, DebuggerAgentState::instrumentationBreakpoints);
if (breakpoints->get(breakpointId)) {
- return Response::Error("Instrumentation breakpoint is already enabled.");
+ return Response::ServerError(
+ "Instrumentation breakpoint is already enabled.");
}
breakpoints->setBoolean(breakpointId, true);
*outBreakpointId = breakpointId;
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
BreakpointType type;
String16 selector;
if (!parseBreakpointId(breakpointId, &type, &selector)) {
- return Response::OK();
+ return Response::Success();
}
protocol::DictionaryValue* breakpoints = nullptr;
switch (type) {
@@ -647,7 +657,7 @@ Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
}
removeBreakpointImpl(breakpointId, scripts);
- return Response::OK();
+ return Response::Success();
}
void V8DebuggerAgentImpl::removeBreakpointImpl(
@@ -679,7 +689,7 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
String16 scriptId = start->getScriptId();
if (start->getLineNumber() < 0 || start->getColumnNumber(0) < 0)
- return Response::Error(
+ return Response::ServerError(
"start.lineNumber and start.columnNumber should be >= 0");
v8::debug::Location v8Start(start->getLineNumber(),
@@ -687,23 +697,24 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
v8::debug::Location v8End;
if (end.isJust()) {
if (end.fromJust()->getScriptId() != scriptId)
- return Response::Error("Locations should contain the same scriptId");
+ return Response::ServerError(
+ "Locations should contain the same scriptId");
int line = end.fromJust()->getLineNumber();
int column = end.fromJust()->getColumnNumber(0);
if (line < 0 || column < 0)
- return Response::Error(
+ return Response::ServerError(
"end.lineNumber and end.columnNumber should be >= 0");
v8End = v8::debug::Location(line, column);
}
auto it = m_scripts.find(scriptId);
- if (it == m_scripts.end()) return Response::Error("Script not found");
+ if (it == m_scripts.end()) return Response::ServerError("Script not found");
std::vector<v8::debug::BreakLocation> v8Locations;
{
v8::HandleScope handleScope(m_isolate);
int contextId = it->second->executionContextId();
InspectedContext* inspected = m_inspector->getContext(contextId);
if (!inspected) {
- return Response::Error("Cannot retrive script context");
+ return Response::ServerError("Cannot retrive script context");
}
v8::Context::Scope contextScope(inspected->context());
v8::MicrotasksScope microtasks(m_isolate,
@@ -727,23 +738,23 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
}
(*locations)->emplace_back(std::move(breakLocation));
}
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::continueToLocation(
std::unique_ptr<protocol::Debugger::Location> location,
Maybe<String16> targetCallFrames) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
ScriptsMap::iterator it = m_scripts.find(location->getScriptId());
if (it == m_scripts.end()) {
- return Response::Error("Cannot continue to specified location");
+ return Response::ServerError("Cannot continue to specified location");
}
V8DebuggerScript* script = it->second.get();
int contextId = script->executionContextId();
InspectedContext* inspected = m_inspector->getContext(contextId);
if (!inspected)
- return Response::Error("Cannot continue to specified location");
+ return Response::ServerError("Cannot continue to specified location");
v8::HandleScope handleScope(m_isolate);
v8::Context::Scope contextScope(inspected->context());
return m_debugger->continueToLocation(
@@ -757,7 +768,7 @@ Response V8DebuggerAgentImpl::getStackTrace(
std::unique_ptr<protocol::Runtime::StackTrace>* outStackTrace) {
bool isOk = false;
int64_t id = inStackTraceId->getId().toInteger64(&isOk);
- if (!isOk) return Response::Error("Invalid stack trace id");
+ if (!isOk) return Response::ServerError("Invalid stack trace id");
V8DebuggerId debuggerId;
if (inStackTraceId->hasDebuggerId()) {
@@ -765,19 +776,20 @@ Response V8DebuggerAgentImpl::getStackTrace(
} else {
debuggerId = m_debugger->debuggerIdFor(m_session->contextGroupId());
}
- if (!debuggerId.isValid()) return Response::Error("Invalid stack trace id");
+ if (!debuggerId.isValid())
+ return Response::ServerError("Invalid stack trace id");
V8StackTraceId v8StackTraceId(id, debuggerId.pair());
if (v8StackTraceId.IsInvalid())
- return Response::Error("Invalid stack trace id");
+ return Response::ServerError("Invalid stack trace id");
auto stack =
m_debugger->stackTraceFor(m_session->contextGroupId(), v8StackTraceId);
if (!stack) {
- return Response::Error("Stack trace with given id is not found");
+ return Response::ServerError("Stack trace with given id is not found");
}
*outStackTrace = stack->buildInspectorObject(
m_debugger, m_debugger->maxAsyncCallChainDepth());
- return Response::OK();
+ return Response::Success();
}
bool V8DebuggerAgentImpl::isFunctionBlackboxed(const String16& scriptId,
@@ -877,13 +889,13 @@ Response V8DebuggerAgentImpl::searchInContent(
v8::HandleScope handles(m_isolate);
ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end())
- return Response::Error("No script for id: " + scriptId);
+ return Response::ServerError("No script for id: " + scriptId.utf8());
*results = std::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>(
searchInTextByLinesImpl(m_session, it->second->source(0), query,
optionalCaseSensitive.fromMaybe(false),
optionalIsRegex.fromMaybe(false)));
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setScriptSource(
@@ -893,11 +905,11 @@ Response V8DebuggerAgentImpl::setScriptSource(
Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId,
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end()) {
- return Response::Error("No script with given id found");
+ return Response::ServerError("No script with given id found");
}
int contextId = it->second->executionContextId();
InspectedContext* inspected = m_inspector->getContext(contextId);
@@ -920,17 +932,17 @@ Response V8DebuggerAgentImpl::setScriptSource(
.setColumnNumber(result.column_number != -1 ? result.column_number
: 0)
.build();
- return Response::OK();
+ return Response::Success();
} else {
*stackChanged = result.stack_changed;
}
std::unique_ptr<Array<CallFrame>> callFrames;
Response response = currentCallFrames(&callFrames);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
*newCallFrames = std::move(callFrames);
*asyncStackTrace = currentAsyncStackTrace();
*asyncStackTraceId = currentExternalStackTrace();
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::restartFrame(
@@ -938,52 +950,52 @@ Response V8DebuggerAgentImpl::restartFrame(
std::unique_ptr<Array<CallFrame>>* newCallFrames,
Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId) {
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
int frameOrdinal = static_cast<int>(scope.frameOrdinal());
auto it = v8::debug::StackTraceIterator::Create(m_isolate, frameOrdinal);
if (it->Done()) {
- return Response::Error("Could not find call frame with given id");
+ return Response::ServerError("Could not find call frame with given id");
}
if (!it->Restart()) {
return Response::InternalError();
}
response = currentCallFrames(newCallFrames);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
*asyncStackTrace = currentAsyncStackTrace();
*asyncStackTraceId = currentExternalStackTrace();
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::getScriptSource(
const String16& scriptId, String16* scriptSource,
Maybe<protocol::Binary>* bytecode) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end())
- return Response::Error("No script for id: " + scriptId);
+ return Response::ServerError("No script for id: " + scriptId.utf8());
*scriptSource = it->second->source(0);
v8::MemorySpan<const uint8_t> span;
if (it->second->wasmBytecode().To(&span)) {
*bytecode = protocol::Binary::fromSpan(span.data(), span.size());
}
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::getWasmBytecode(const String16& scriptId,
protocol::Binary* bytecode) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
ScriptsMap::iterator it = m_scripts.find(scriptId);
if (it == m_scripts.end())
- return Response::Error("No script for id: " + scriptId);
+ return Response::ServerError("No script for id: " + scriptId.utf8());
v8::MemorySpan<const uint8_t> span;
if (!it->second->wasmBytecode().To(&span))
- return Response::Error("Script with id " + scriptId +
- " is not WebAssembly");
+ return Response::ServerError("Script with id " + scriptId.utf8() +
+ " is not WebAssembly");
*bytecode = protocol::Binary::fromSpan(span.data(), span.size());
- return Response::OK();
+ return Response::Success();
}
void V8DebuggerAgentImpl::pushBreakDetails(
@@ -1021,8 +1033,8 @@ void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
}
Response V8DebuggerAgentImpl::pause() {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (isPaused()) return Response::OK();
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+ if (isPaused()) return Response::Success();
if (m_debugger->canBreakProgram()) {
m_debugger->interruptAndBreak(m_session->contextGroupId());
} else {
@@ -1031,47 +1043,48 @@ Response V8DebuggerAgentImpl::pause() {
}
pushBreakDetails(protocol::Debugger::Paused::ReasonEnum::Other, nullptr);
}
- return Response::OK();
+ return Response::Success();
}
-Response V8DebuggerAgentImpl::resume() {
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+Response V8DebuggerAgentImpl::resume(Maybe<bool> terminateOnResume) {
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
- m_debugger->continueProgram(m_session->contextGroupId());
- return Response::OK();
+ m_debugger->continueProgram(m_session->contextGroupId(),
+ terminateOnResume.fromMaybe(false));
+ return Response::Success();
}
Response V8DebuggerAgentImpl::stepOver() {
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepOverStatement(m_session->contextGroupId());
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::stepInto(Maybe<bool> inBreakOnAsyncCall) {
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepIntoStatement(m_session->contextGroupId(),
inBreakOnAsyncCall.fromMaybe(false));
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::stepOut() {
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepOutOfFunction(m_session->contextGroupId());
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::pauseOnAsyncCall(
std::unique_ptr<protocol::Runtime::StackTraceId> inParentStackTraceId) {
// Deprecated, just return OK.
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setPauseOnExceptions(
const String16& stringPauseState) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
v8::debug::ExceptionBreakState pauseState;
if (stringPauseState == "none") {
pauseState = v8::debug::NoBreakOnException;
@@ -1080,11 +1093,11 @@ Response V8DebuggerAgentImpl::setPauseOnExceptions(
} else if (stringPauseState == "uncaught") {
pauseState = v8::debug::BreakOnUncaughtException;
} else {
- return Response::Error("Unknown pause on exceptions mode: " +
- stringPauseState);
+ return Response::ServerError("Unknown pause on exceptions mode: " +
+ stringPauseState.utf8());
}
setPauseOnExceptionsImpl(pauseState);
- return Response::OK();
+ return Response::Success();
}
void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(int pauseState) {
@@ -1102,17 +1115,17 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
Maybe<bool> throwOnSideEffect, Maybe<double> timeout,
std::unique_ptr<RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
int frameOrdinal = static_cast<int>(scope.frameOrdinal());
auto it = v8::debug::StackTraceIterator::Create(m_isolate, frameOrdinal);
if (it->Done()) {
- return Response::Error("Could not find call frame with given id");
+ return Response::ServerError("Could not find call frame with given id");
}
v8::MaybeLocal<v8::Value> maybeResultValue;
@@ -1120,7 +1133,7 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
V8InspectorImpl::EvaluateScope evaluateScope(scope);
if (timeout.isJust()) {
response = evaluateScope.setTimeout(timeout.fromJust() / 1000.0);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
}
maybeResultValue = it->Evaluate(toV8String(m_isolate, expression),
throwOnSideEffect.fromMaybe(false));
@@ -1128,7 +1141,7 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
: WrapMode::kNoPreview;
if (returnByValue.fromMaybe(false)) mode = WrapMode::kForceValue;
@@ -1141,20 +1154,20 @@ Response V8DebuggerAgentImpl::setVariableValue(
int scopeNumber, const String16& variableName,
std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
const String16& callFrameId) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
v8::Local<v8::Value> newValue;
response = scope.injectedScript()->resolveCallArgument(newValueArgument.get(),
&newValue);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
int frameOrdinal = static_cast<int>(scope.frameOrdinal());
auto it = v8::debug::StackTraceIterator::Create(m_isolate, frameOrdinal);
if (it->Done()) {
- return Response::Error("Could not find call frame with given id");
+ return Response::ServerError("Could not find call frame with given id");
}
auto scopeIterator = it->GetScopeIterator();
while (!scopeIterator->Done() && scopeNumber > 0) {
@@ -1162,7 +1175,7 @@ Response V8DebuggerAgentImpl::setVariableValue(
scopeIterator->Advance();
}
if (scopeNumber != 0) {
- return Response::Error("Could not find scope with given number");
+ return Response::ServerError("Could not find scope with given number");
}
if (!scopeIterator->SetVariableValue(toV8String(m_isolate, variableName),
@@ -1170,40 +1183,40 @@ Response V8DebuggerAgentImpl::setVariableValue(
scope.tryCatch().HasCaught()) {
return Response::InternalError();
}
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setReturnValue(
std::unique_ptr<protocol::Runtime::CallArgument> protocolNewValue) {
- if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
v8::HandleScope handleScope(m_isolate);
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
if (iterator->Done()) {
- return Response::Error("Could not find top call frame");
+ return Response::ServerError("Could not find top call frame");
}
if (iterator->GetReturnValue().IsEmpty()) {
- return Response::Error(
+ return Response::ServerError(
"Could not update return value at non-return position");
}
InjectedScript::ContextScope scope(m_session, iterator->GetContextId());
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
v8::Local<v8::Value> newValue;
response = scope.injectedScript()->resolveCallArgument(protocolNewValue.get(),
&newValue);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
v8::debug::SetReturnValue(m_isolate, newValue);
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
if (!enabled() && !m_session->runtimeAgent()->enabled()) {
- return Response::Error(kDebuggerNotEnabled);
+ return Response::ServerError(kDebuggerNotEnabled);
}
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
m_debugger->setAsyncCallStackDepth(this, depth);
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setBlackboxPatterns(
@@ -1212,7 +1225,7 @@ Response V8DebuggerAgentImpl::setBlackboxPatterns(
m_blackboxPattern = nullptr;
resetBlackboxedStateCache();
m_state->remove(DebuggerAgentState::blackboxPattern);
- return Response::OK();
+ return Response::Success();
}
String16Builder patternBuilder;
@@ -1225,19 +1238,20 @@ Response V8DebuggerAgentImpl::setBlackboxPatterns(
patternBuilder.append(')');
String16 pattern = patternBuilder.toString();
Response response = setBlackboxPattern(pattern);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
resetBlackboxedStateCache();
m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::setBlackboxPattern(const String16& pattern) {
std::unique_ptr<V8Regex> regex(new V8Regex(
m_inspector, pattern, true /** caseSensitive */, false /** multiline */));
if (!regex->isValid())
- return Response::Error("Pattern parser error: " + regex->errorMessage());
+ return Response::ServerError("Pattern parser error: " +
+ regex->errorMessage().utf8());
m_blackboxPattern = std::move(regex);
- return Response::OK();
+ return Response::Success();
}
void V8DebuggerAgentImpl::resetBlackboxedStateCache() {
@@ -1252,12 +1266,12 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
inPositions) {
auto it = m_scripts.find(scriptId);
if (it == m_scripts.end())
- return Response::Error("No script with passed id.");
+ return Response::ServerError("No script with passed id.");
if (inPositions->empty()) {
m_blackboxedPositions.erase(scriptId);
it->second->resetBlackboxedStateCache();
- return Response::OK();
+ return Response::Success();
}
std::vector<std::pair<int, int>> positions;
@@ -1265,9 +1279,10 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
for (const std::unique_ptr<protocol::Debugger::ScriptPosition>& position :
*inPositions) {
if (position->getLineNumber() < 0)
- return Response::Error("Position missing 'line' or 'line' < 0.");
+ return Response::ServerError("Position missing 'line' or 'line' < 0.");
if (position->getColumnNumber() < 0)
- return Response::Error("Position missing 'column' or 'column' < 0.");
+ return Response::ServerError(
+ "Position missing 'column' or 'column' < 0.");
positions.push_back(
std::make_pair(position->getLineNumber(), position->getColumnNumber()));
}
@@ -1277,20 +1292,20 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
if (positions[i - 1].first == positions[i].first &&
positions[i - 1].second < positions[i].second)
continue;
- return Response::Error(
+ return Response::ServerError(
"Input positions array is not sorted or contains duplicate values.");
}
m_blackboxedPositions[scriptId] = positions;
it->second->resetBlackboxedStateCache();
- return Response::OK();
+ return Response::Success();
}
Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<Array<CallFrame>>* result) {
if (!isPaused()) {
*result = std::make_unique<Array<CallFrame>>();
- return Response::OK();
+ return Response::Success();
}
v8::HandleScope handles(m_isolate);
*result = std::make_unique<Array<CallFrame>>();
@@ -1309,7 +1324,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
auto scopeIterator = iterator->GetScopeIterator();
Response res =
buildScopes(m_isolate, scopeIterator.get(), injectedScript, &scopes);
- if (!res.isSuccess()) return res;
+ if (!res.IsSuccess()) return res;
std::unique_ptr<RemoteObject> protocolReceiver;
if (injectedScript) {
@@ -1318,7 +1333,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
res =
injectedScript->wrapObject(receiver, kBacktraceObjectGroup,
WrapMode::kNoPreview, &protocolReceiver);
- if (!res.isSuccess()) return res;
+ if (!res.IsSuccess()) return res;
}
}
if (!protocolReceiver) {
@@ -1368,12 +1383,12 @@ Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<RemoteObject> value;
res = injectedScript->wrapObject(returnValue, kBacktraceObjectGroup,
WrapMode::kNoPreview, &value);
- if (!res.isSuccess()) return res;
+ if (!res.IsSuccess()) return res;
frame->setReturnValue(std::move(value));
}
(*result)->emplace_back(std::move(frame));
}
- return Response::OK();
+ return Response::Success();
}
std::unique_ptr<protocol::Runtime::StackTrace>
@@ -1399,6 +1414,15 @@ bool V8DebuggerAgentImpl::isPaused() const {
return m_debugger->isPausedInContextGroup(m_session->contextGroupId());
}
+static String16 getScriptLanguage(const V8DebuggerScript& script) {
+ switch (script.getLanguage()) {
+ case V8DebuggerScript::Language::WebAssembly:
+ return protocol::Debugger::ScriptLanguageEnum::WebAssembly;
+ case V8DebuggerScript::Language::JavaScript:
+ return protocol::Debugger::ScriptLanguageEnum::JavaScript;
+ }
+}
+
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
v8::HandleScope handles(m_isolate);
@@ -1417,14 +1441,23 @@ void V8DebuggerAgentImpl::didParseSource(
if (inspected) {
// Script reused between different groups/sessions can have a stale
// execution context id.
+ const String16& aux = inspected->auxData();
+ std::vector<uint8_t> cbor;
+ v8_crdtp::json::ConvertJSONToCBOR(
+ v8_crdtp::span<uint16_t>(aux.characters16(), aux.length()), &cbor);
executionContextAuxData = protocol::DictionaryValue::cast(
- protocol::StringUtil::parseJSON(inspected->auxData()));
+ protocol::Value::parseBinary(cbor.data(), cbor.size()));
}
bool isLiveEdit = script->isLiveEdit();
bool hasSourceURLComment = script->hasSourceURLComment();
bool isModule = script->isModule();
String16 scriptId = script->scriptId();
String16 scriptURL = script->sourceURL();
+ String16 scriptLanguage = getScriptLanguage(*script);
+ Maybe<int> codeOffset =
+ script->getLanguage() == V8DebuggerScript::Language::JavaScript
+ ? Maybe<int>()
+ : script->codeOffset();
m_scripts[scriptId] = std::move(script);
// Release the strong reference to get notified when debugger is the only
@@ -1460,25 +1493,28 @@ void V8DebuggerAgentImpl::didParseSource(
scriptRef->endLine(), scriptRef->endColumn(), contextId,
scriptRef->hash(), std::move(executionContextAuxDataParam),
std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam,
- scriptRef->length(), std::move(stackTrace));
+ scriptRef->length(), std::move(stackTrace), std::move(codeOffset),
+ std::move(scriptLanguage));
return;
}
- // TODO(herhut, dgozman): Report correct length for WASM if needed for
+ // TODO(herhut, dgozman): Report correct length for Wasm if needed for
// coverage. Or do not send the length at all and change coverage instead.
if (scriptRef->isSourceLoadedLazily()) {
m_frontend.scriptParsed(
scriptId, scriptURL, 0, 0, 0, 0, contextId, scriptRef->hash(),
std::move(executionContextAuxDataParam), isLiveEditParam,
std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam, 0,
- std::move(stackTrace));
+ std::move(stackTrace), std::move(codeOffset),
+ std::move(scriptLanguage));
} else {
m_frontend.scriptParsed(
scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
scriptRef->endLine(), scriptRef->endColumn(), contextId,
scriptRef->hash(), std::move(executionContextAuxDataParam),
isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
- isModuleParam, scriptRef->length(), std::move(stackTrace));
+ isModuleParam, scriptRef->length(), std::move(stackTrace),
+ std::move(codeOffset), std::move(scriptLanguage));
}
std::vector<protocol::DictionaryValue*> potentialBreakpoints;
@@ -1654,7 +1690,7 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<CallFrame>> protocolCallFrames;
Response response = currentCallFrames(&protocolCallFrames);
- if (!response.isSuccess())
+ if (!response.IsSuccess())
protocolCallFrames = std::make_unique<Array<CallFrame>>();
m_frontend.paused(std::move(protocolCallFrames), breakReason,
@@ -1665,6 +1701,7 @@ void V8DebuggerAgentImpl::didPause(
void V8DebuggerAgentImpl::didContinue() {
clearBreakDetails();
m_frontend.resumed();
+ m_frontend.flush();
}
void V8DebuggerAgentImpl::breakProgram(
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 540d655bc2..df719a4fa3 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -98,7 +98,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Response getWasmBytecode(const String16& scriptId,
protocol::Binary* bytecode) override;
Response pause() override;
- Response resume() override;
+ Response resume(Maybe<bool> terminateOnResume) override;
Response stepOver() override;
Response stepInto(Maybe<bool> inBreakOnAsyncCall) override;
Response stepOut() override;
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index f4b3f487c1..551beb242b 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -6,6 +6,7 @@
#include "src/base/memory.h"
#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
@@ -120,10 +121,16 @@ class ActualScript : public V8DebuggerScript {
if (!script->IsWasm()) return v8::Nothing<v8::MemorySpan<const uint8_t>>();
return v8::Just(v8::debug::WasmScript::Cast(*script)->Bytecode());
}
+ Language getLanguage() const override { return m_language; }
int startLine() const override { return m_startLine; }
int startColumn() const override { return m_startColumn; }
int endLine() const override { return m_endLine; }
int endColumn() const override { return m_endColumn; }
+ int codeOffset() const override {
+ auto script = this->script();
+ if (!script->IsWasm()) return 0;
+ return v8::debug::WasmScript::Cast(*script)->CodeOffset();
+ }
bool isSourceLoadedLazily() const override { return false; }
int length() const override {
v8::HandleScope scope(m_isolate);
@@ -274,6 +281,11 @@ class ActualScript : public V8DebuggerScript {
}
USE(script->ContextId().To(&m_executionContextId));
+ if (script->IsWasm()) {
+ m_language = V8DebuggerScript::Language::WebAssembly;
+ } else {
+ m_language = V8DebuggerScript::Language::JavaScript;
+ }
m_isModule = script->IsModule();
@@ -297,6 +309,7 @@ class ActualScript : public V8DebuggerScript {
V8DebuggerAgentImpl* m_agent;
String16 m_sourceMappingURL;
+ Language m_language;
bool m_isLiveEdit = false;
bool m_isModule = false;
mutable String16 m_hash;
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 164e017610..d4ad784394 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -46,6 +46,7 @@ class V8InspectorClient;
class V8DebuggerScript {
public:
+ enum class Language { JavaScript, WebAssembly };
static std::unique_ptr<V8DebuggerScript> Create(
v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
bool isLiveEdit, V8DebuggerAgentImpl* agent, V8InspectorClient* client);
@@ -59,11 +60,13 @@ class V8DebuggerScript {
virtual const String16& sourceMappingURL() const = 0;
virtual String16 source(size_t pos, size_t len = UINT_MAX) const = 0;
virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0;
+ virtual Language getLanguage() const = 0;
virtual const String16& hash() const = 0;
virtual int startLine() const = 0;
virtual int startColumn() const = 0;
virtual int endLine() const = 0;
virtual int endColumn() const = 0;
+ virtual int codeOffset() const = 0;
int executionContextId() const { return m_executionContextId; }
virtual bool isLiveEdit() const = 0;
virtual bool isModule() const = 0;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 3bdbba80b1..4a2119ff1c 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -124,6 +124,7 @@ void V8Debugger::enable() {
m_isolate->AddNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback, this);
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
+ v8::debug::TierDownAllModulesPerIsolate(m_isolate);
}
void V8Debugger::disable() {
@@ -146,6 +147,7 @@ void V8Debugger::disable() {
m_taskWithScheduledBreakPauseRequested = false;
m_pauseOnNextCallRequested = false;
m_pauseOnAsyncCall = false;
+ v8::debug::TierUpAllModulesPerIsolate(m_isolate);
v8::debug::SetDebugDelegate(m_isolate, nullptr);
m_isolate->RemoveNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback,
m_originalHeapLimit);
@@ -245,9 +247,15 @@ void V8Debugger::interruptAndBreak(int targetContextGroupId) {
nullptr);
}
-void V8Debugger::continueProgram(int targetContextGroupId) {
+void V8Debugger::continueProgram(int targetContextGroupId,
+ bool terminateOnResume) {
if (m_pausedContextGroupId != targetContextGroupId) return;
- if (isPaused()) m_inspector->client()->quitMessageLoopOnPause();
+ if (isPaused()) {
+ if (terminateOnResume) {
+ v8::debug::SetTerminateOnResume(m_isolate);
+ }
+ m_inspector->client()->quitMessageLoopOnPause();
+ }
}
void V8Debugger::breakProgramOnAssert(int targetContextGroupId) {
@@ -295,7 +303,10 @@ bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId,
bool onlyAtReturn) {
v8::HandleScope handleScope(m_isolate);
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
- CHECK(!iterator->Done());
+ // When stepping through extensions code, it is possible that the
+ // iterator doesn't have any frames, since we exclude all frames
+ // that correspond to extension scripts.
+ if (iterator->Done()) return false;
bool atReturn = !iterator->GetReturnValue().IsEmpty();
iterator->Advance();
// Synchronous stack has more then one frame.
@@ -328,8 +339,8 @@ void V8Debugger::terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) {
if (m_terminateExecutionCallback) {
if (callback) {
- callback->sendFailure(
- Response::Error("There is current termination request in progress"));
+ callback->sendFailure(Response::ServerError(
+ "There is current termination request in progress"));
}
return;
}
@@ -383,9 +394,9 @@ Response V8Debugger::continueToLocation(
}
continueProgram(targetContextGroupId);
// TODO(kozyatinskiy): Return actual line and column number.
- return Response::OK();
+ return Response::Success();
} else {
- return Response::Error("Cannot continue to specified location");
+ return Response::ServerError("Cannot continue to specified location");
}
}
@@ -674,6 +685,9 @@ v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
case v8::debug::ScopeIterator::ScopeTypeModule:
description = "Module" + nameSuffix;
break;
+ case v8::debug::ScopeIterator::ScopeTypeWasmExpressionStack:
+ description = "Wasm Expression Stack" + nameSuffix;
+ break;
}
v8::Local<v8::Object> object = iterator->GetObject();
createDataProperty(context, scope,
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index f5605818ab..60837f47ca 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -77,7 +77,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
bool canBreakProgram();
void breakProgram(int targetContextGroupId);
void interruptAndBreak(int targetContextGroupId);
- void continueProgram(int targetContextGroupId);
+ void continueProgram(int targetContextGroupId,
+ bool terminateOnResume = false);
void breakProgramOnAssert(int targetContextGroupId);
void setPauseOnNextCall(bool, int targetContextGroupId);
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index 8bd68cdbc6..3d6035c762 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -173,7 +173,7 @@ void V8HeapProfilerAgentImpl::restore() {
Response V8HeapProfilerAgentImpl::collectGarbage() {
m_isolate->LowMemoryNotification();
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::startTrackingHeapObjects(
@@ -183,7 +183,7 @@ Response V8HeapProfilerAgentImpl::startTrackingHeapObjects(
m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled,
allocationTrackingEnabled);
startTrackingHeapObjectsInternal(allocationTrackingEnabled);
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
@@ -192,12 +192,12 @@ Response V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
takeHeapSnapshot(std::move(reportProgress),
std::move(treatGlobalObjectsAsRoots));
stopTrackingHeapObjectsInternal();
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::enable() {
m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, true);
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::disable() {
@@ -209,13 +209,13 @@ Response V8HeapProfilerAgentImpl::disable() {
}
m_isolate->GetHeapProfiler()->ClearObjectIds();
m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, false);
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::takeHeapSnapshot(
Maybe<bool> reportProgress, Maybe<bool> treatGlobalObjectsAsRoots) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- if (!profiler) return Response::Error("Cannot access v8 heap profiler");
+ if (!profiler) return Response::ServerError("Cannot access v8 heap profiler");
std::unique_ptr<HeapSnapshotProgress> progress;
if (reportProgress.fromMaybe(false))
progress.reset(new HeapSnapshotProgress(&m_frontend));
@@ -223,11 +223,11 @@ Response V8HeapProfilerAgentImpl::takeHeapSnapshot(
GlobalObjectNameResolver resolver(m_session);
const v8::HeapSnapshot* snapshot = profiler->TakeHeapSnapshot(
progress.get(), &resolver, treatGlobalObjectsAsRoots.fromMaybe(true));
- if (!snapshot) return Response::Error("Failed to take heap snapshot");
+ if (!snapshot) return Response::ServerError("Failed to take heap snapshot");
HeapSnapshotOutputStream stream(&m_frontend);
snapshot->Serialize(&stream);
const_cast<v8::HeapSnapshot*>(snapshot)->Delete();
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
@@ -235,36 +235,38 @@ Response V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
bool ok;
int id = heapSnapshotObjectId.toInteger(&ok);
- if (!ok) return Response::Error("Invalid heap snapshot object id");
+ if (!ok) return Response::ServerError("Invalid heap snapshot object id");
v8::HandleScope handles(m_isolate);
v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
- if (heapObject.IsEmpty()) return Response::Error("Object is not available");
+ if (heapObject.IsEmpty())
+ return Response::ServerError("Object is not available");
if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
- return Response::Error("Object is not available");
+ return Response::ServerError("Object is not available");
*result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
objectGroup.fromMaybe(""), false);
- if (!*result) return Response::Error("Object is not available");
- return Response::OK();
+ if (!*result) return Response::ServerError("Object is not available");
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::addInspectedHeapObject(
const String16& inspectedHeapObjectId) {
bool ok;
int id = inspectedHeapObjectId.toInteger(&ok);
- if (!ok) return Response::Error("Invalid heap snapshot object id");
+ if (!ok) return Response::ServerError("Invalid heap snapshot object id");
v8::HandleScope handles(m_isolate);
v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
- if (heapObject.IsEmpty()) return Response::Error("Object is not available");
+ if (heapObject.IsEmpty())
+ return Response::ServerError("Object is not available");
if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
- return Response::Error("Object is not available");
+ return Response::ServerError("Object is not available");
m_session->addInspectedObject(
std::unique_ptr<InspectableHeapObject>(new InspectableHeapObject(id)));
- return Response::OK();
+ return Response::Success();
}
Response V8HeapProfilerAgentImpl::getHeapObjectId(
@@ -274,12 +276,12 @@ Response V8HeapProfilerAgentImpl::getHeapObjectId(
v8::Local<v8::Context> context;
Response response =
m_session->unwrapObject(objectId, &value, &context, nullptr);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (value->IsUndefined()) return Response::InternalError();
v8::SnapshotObjectId id = m_isolate->GetHeapProfiler()->GetObjectId(value);
*heapSnapshotObjectId = String16::fromInteger(static_cast<size_t>(id));
- return Response::OK();
+ return Response::Success();
}
void V8HeapProfilerAgentImpl::requestHeapStatsUpdate() {
@@ -320,7 +322,7 @@ void V8HeapProfilerAgentImpl::stopTrackingHeapObjectsInternal() {
Response V8HeapProfilerAgentImpl::startSampling(
Maybe<double> samplingInterval) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- if (!profiler) return Response::Error("Cannot access v8 heap profiler");
+ if (!profiler) return Response::ServerError("Cannot access v8 heap profiler");
const unsigned defaultSamplingInterval = 1 << 15;
double samplingIntervalValue =
samplingInterval.fromMaybe(defaultSamplingInterval);
@@ -331,7 +333,7 @@ Response V8HeapProfilerAgentImpl::startSampling(
profiler->StartSamplingHeapProfiler(
static_cast<uint64_t>(samplingIntervalValue), 128,
v8::HeapProfiler::kSamplingForceGC);
- return Response::OK();
+ return Response::Success();
}
namespace {
@@ -367,7 +369,7 @@ buildSampingHeapProfileNode(v8::Isolate* isolate,
Response V8HeapProfilerAgentImpl::stopSampling(
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
Response result = getSamplingProfile(profile);
- if (result.isSuccess()) {
+ if (result.IsSuccess()) {
m_isolate->GetHeapProfiler()->StopSamplingHeapProfiler();
m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
false);
@@ -383,7 +385,7 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile(
std::unique_ptr<v8::AllocationProfile> v8Profile(
profiler->GetAllocationProfile());
if (!v8Profile)
- return Response::Error("V8 sampling heap profiler was not started.");
+ return Response::ServerError("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
auto samples = std::make_unique<
protocol::Array<protocol::HeapProfiler::SamplingHeapProfileSample>>();
@@ -399,7 +401,7 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile(
.setHead(buildSampingHeapProfileNode(m_isolate, root))
.setSamples(std::move(samples))
.build();
- return Response::OK();
+ return Response::Success();
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index c767dbf09d..18c592ef11 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -155,8 +155,7 @@ std::unique_ptr<V8StackTrace> V8InspectorImpl::createStackTrace(
}
std::unique_ptr<V8InspectorSession> V8InspectorImpl::connect(
- int contextGroupId, V8Inspector::Channel* channel,
- const StringView& state) {
+ int contextGroupId, V8Inspector::Channel* channel, StringView state) {
int sessionId = ++m_lastSessionId;
std::unique_ptr<V8InspectorSessionImpl> session =
V8InspectorSessionImpl::create(this, contextGroupId, sessionId, channel,
@@ -256,9 +255,9 @@ void V8InspectorImpl::idleStarted() { m_isolate->SetIdle(true); }
void V8InspectorImpl::idleFinished() { m_isolate->SetIdle(false); }
unsigned V8InspectorImpl::exceptionThrown(
- v8::Local<v8::Context> context, const StringView& message,
- v8::Local<v8::Value> exception, const StringView& detailedMessage,
- const StringView& url, unsigned lineNumber, unsigned columnNumber,
+ v8::Local<v8::Context> context, StringView message,
+ v8::Local<v8::Value> exception, StringView detailedMessage, StringView url,
+ unsigned lineNumber, unsigned columnNumber,
std::unique_ptr<V8StackTrace> stackTrace, int scriptId) {
int groupId = contextGroupId(context);
if (!groupId || m_muteExceptionsMap[groupId]) return 0;
@@ -277,7 +276,7 @@ unsigned V8InspectorImpl::exceptionThrown(
void V8InspectorImpl::exceptionRevoked(v8::Local<v8::Context> context,
unsigned exceptionId,
- const StringView& message) {
+ StringView message) {
int groupId = contextGroupId(context);
if (!groupId) return;
@@ -292,8 +291,7 @@ std::unique_ptr<V8StackTrace> V8InspectorImpl::captureStackTrace(
return m_debugger->captureStackTrace(fullStack);
}
-V8StackTraceId V8InspectorImpl::storeCurrentStackTrace(
- const StringView& description) {
+V8StackTraceId V8InspectorImpl::storeCurrentStackTrace(StringView description) {
return m_debugger->storeCurrentStackTrace(description);
}
@@ -305,7 +303,7 @@ void V8InspectorImpl::externalAsyncTaskFinished(const V8StackTraceId& parent) {
m_debugger->externalAsyncTaskFinished(parent);
}
-void V8InspectorImpl::asyncTaskScheduled(const StringView& taskName, void* task,
+void V8InspectorImpl::asyncTaskScheduled(StringView taskName, void* task,
bool recurring) {
if (!task) return;
m_debugger->asyncTaskScheduled(taskName, task, recurring);
@@ -467,12 +465,12 @@ class V8InspectorImpl::EvaluateScope::TerminateTask : public v8::Task {
protocol::Response V8InspectorImpl::EvaluateScope::setTimeout(double timeout) {
if (m_isolate->IsExecutionTerminating()) {
- return protocol::Response::Error("Execution was terminated");
+ return protocol::Response::ServerError("Execution was terminated");
}
m_cancelToken.reset(new CancelToken());
v8::debug::GetCurrentPlatform()->CallDelayedOnWorkerThread(
std::make_unique<TerminateTask>(m_isolate, m_cancelToken), timeout);
- return protocol::Response::OK();
+ return protocol::Response::Success();
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index dce3db52bd..43e3369953 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -77,7 +77,7 @@ class V8InspectorImpl : public V8Inspector {
// V8Inspector implementation.
std::unique_ptr<V8InspectorSession> connect(int contextGroupId,
V8Inspector::Channel*,
- const StringView& state) override;
+ StringView state) override;
void contextCreated(const V8ContextInfo&) override;
void contextDestroyed(v8::Local<v8::Context>) override;
v8::MaybeLocal<v8::Context> contextById(int contextId) override;
@@ -85,25 +85,25 @@ class V8InspectorImpl : public V8Inspector {
void resetContextGroup(int contextGroupId) override;
void idleStarted() override;
void idleFinished() override;
- unsigned exceptionThrown(v8::Local<v8::Context>, const StringView& message,
+ unsigned exceptionThrown(v8::Local<v8::Context>, StringView message,
v8::Local<v8::Value> exception,
- const StringView& detailedMessage,
- const StringView& url, unsigned lineNumber,
- unsigned columnNumber, std::unique_ptr<V8StackTrace>,
+ StringView detailedMessage, StringView url,
+ unsigned lineNumber, unsigned columnNumber,
+ std::unique_ptr<V8StackTrace>,
int scriptId) override;
void exceptionRevoked(v8::Local<v8::Context>, unsigned exceptionId,
- const StringView& message) override;
+ StringView message) override;
std::unique_ptr<V8StackTrace> createStackTrace(
v8::Local<v8::StackTrace>) override;
std::unique_ptr<V8StackTrace> captureStackTrace(bool fullStack) override;
- void asyncTaskScheduled(const StringView& taskName, void* task,
+ void asyncTaskScheduled(StringView taskName, void* task,
bool recurring) override;
void asyncTaskCanceled(void* task) override;
void asyncTaskStarted(void* task) override;
void asyncTaskFinished(void* task) override;
void allAsyncTasksCanceled() override;
- V8StackTraceId storeCurrentStackTrace(const StringView& description) override;
+ V8StackTraceId storeCurrentStackTrace(StringView description) override;
void externalAsyncTaskStarted(const V8StackTraceId& parent) override;
void externalAsyncTaskFinished(const V8StackTraceId& parent) override;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 45881a4596..3bdc3e80c2 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -5,6 +5,7 @@
#include "src/inspector/v8-inspector-session-impl.h"
#include "../../third_party/inspector_protocol/crdtp/cbor.h"
+#include "../../third_party/inspector_protocol/crdtp/dispatch.h"
#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -32,12 +33,12 @@ using v8_crdtp::cbor::CheckCBORMessage;
using v8_crdtp::json::ConvertCBORToJSON;
using v8_crdtp::json::ConvertJSONToCBOR;
-bool IsCBORMessage(const StringView& msg) {
+bool IsCBORMessage(StringView msg) {
return msg.is8Bit() && msg.length() >= 2 && msg.characters8()[0] == 0xd8 &&
msg.characters8()[1] == 0x5a;
}
-Status ConvertToCBOR(const StringView& state, std::vector<uint8_t>* cbor) {
+Status ConvertToCBOR(StringView state, std::vector<uint8_t>* cbor) {
return state.is8Bit()
? ConvertJSONToCBOR(
span<uint8_t>(state.characters8(), state.length()), cbor)
@@ -45,7 +46,7 @@ Status ConvertToCBOR(const StringView& state, std::vector<uint8_t>* cbor) {
span<uint16_t>(state.characters16(), state.length()), cbor);
}
-std::unique_ptr<protocol::DictionaryValue> ParseState(const StringView& state) {
+std::unique_ptr<protocol::DictionaryValue> ParseState(StringView state) {
std::vector<uint8_t> converted;
span<uint8_t> cbor;
if (IsCBORMessage(state))
@@ -62,7 +63,7 @@ std::unique_ptr<protocol::DictionaryValue> ParseState(const StringView& state) {
} // namespace
// static
-bool V8InspectorSession::canDispatchMethod(const StringView& method) {
+bool V8InspectorSession::canDispatchMethod(StringView method) {
return stringViewStartsWith(method,
protocol::Runtime::Metainfo::commandPrefix) ||
stringViewStartsWith(method,
@@ -84,7 +85,7 @@ int V8ContextInfo::executionContextId(v8::Local<v8::Context> context) {
std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
V8InspectorImpl* inspector, int contextGroupId, int sessionId,
- V8Inspector::Channel* channel, const StringView& state) {
+ V8Inspector::Channel* channel, StringView state) {
return std::unique_ptr<V8InspectorSessionImpl>(new V8InspectorSessionImpl(
inspector, contextGroupId, sessionId, channel, state));
}
@@ -93,7 +94,7 @@ V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
int contextGroupId,
int sessionId,
V8Inspector::Channel* channel,
- const StringView& savedState)
+ StringView savedState)
: m_contextGroupId(contextGroupId),
m_sessionId(sessionId),
m_inspector(inspector),
@@ -167,7 +168,7 @@ protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
std::unique_ptr<StringBuffer> V8InspectorSessionImpl::serializeForFrontend(
std::unique_ptr<protocol::Serializable> message) {
- std::vector<uint8_t> cbor = std::move(*message).TakeSerialized();
+ std::vector<uint8_t> cbor = message->Serialize();
DCHECK(CheckCBORMessage(SpanFrom(cbor)).ok());
if (use_binary_protocol_) return StringBufferFrom(std::move(cbor));
std::vector<uint8_t> json;
@@ -184,23 +185,24 @@ std::unique_ptr<StringBuffer> V8InspectorSessionImpl::serializeForFrontend(
return StringBufferFrom(std::move(string16));
}
-void V8InspectorSessionImpl::sendProtocolResponse(
+void V8InspectorSessionImpl::SendProtocolResponse(
int callId, std::unique_ptr<protocol::Serializable> message) {
m_channel->sendResponse(callId, serializeForFrontend(std::move(message)));
}
-void V8InspectorSessionImpl::sendProtocolNotification(
+void V8InspectorSessionImpl::SendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) {
m_channel->sendNotification(serializeForFrontend(std::move(message)));
}
-void V8InspectorSessionImpl::fallThrough(int callId, const String16& method,
+void V8InspectorSessionImpl::FallThrough(int callId,
+ const v8_crdtp::span<uint8_t> method,
v8_crdtp::span<uint8_t> message) {
// There's no other layer to handle the command.
UNREACHABLE();
}
-void V8InspectorSessionImpl::flushProtocolNotifications() {
+void V8InspectorSessionImpl::FlushProtocolNotifications() {
m_channel->flushProtocolNotifications();
}
@@ -224,14 +226,15 @@ Response V8InspectorSessionImpl::findInjectedScript(
injectedScript = nullptr;
InspectedContext* context =
m_inspector->getContext(m_contextGroupId, contextId);
- if (!context) return Response::Error("Cannot find context with specified id");
+ if (!context)
+ return Response::ServerError("Cannot find context with specified id");
injectedScript = context->getInjectedScript(m_sessionId);
if (!injectedScript) {
injectedScript = context->createInjectedScript(m_sessionId);
if (m_customObjectFormatterEnabled)
injectedScript->setCustomObjectFormatterEnabled(true);
}
- return Response::OK();
+ return Response::Success();
}
Response V8InspectorSessionImpl::findInjectedScript(
@@ -239,7 +242,7 @@ Response V8InspectorSessionImpl::findInjectedScript(
return findInjectedScript(objectId->contextId(), injectedScript);
}
-void V8InspectorSessionImpl::releaseObjectGroup(const StringView& objectGroup) {
+void V8InspectorSessionImpl::releaseObjectGroup(StringView objectGroup) {
releaseObjectGroup(toString16(objectGroup));
}
@@ -253,14 +256,17 @@ void V8InspectorSessionImpl::releaseObjectGroup(const String16& objectGroup) {
}
bool V8InspectorSessionImpl::unwrapObject(
- std::unique_ptr<StringBuffer>* error, const StringView& objectId,
+ std::unique_ptr<StringBuffer>* error, StringView objectId,
v8::Local<v8::Value>* object, v8::Local<v8::Context>* context,
std::unique_ptr<StringBuffer>* objectGroup) {
String16 objectGroupString;
Response response = unwrapObject(toString16(objectId), object, context,
objectGroup ? &objectGroupString : nullptr);
- if (!response.isSuccess()) {
- if (error) *error = StringBufferFrom(response.errorMessage());
+ if (response.IsError()) {
+ if (error) {
+ const std::string& msg = response.Message();
+ *error = StringBufferFrom(String16::fromUTF8(msg.data(), msg.size()));
+ }
return false;
}
if (objectGroup)
@@ -274,22 +280,21 @@ Response V8InspectorSessionImpl::unwrapObject(const String16& objectId,
String16* objectGroup) {
std::unique_ptr<RemoteObjectId> remoteId;
Response response = RemoteObjectId::parse(objectId, &remoteId);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
InjectedScript* injectedScript = nullptr;
response = findInjectedScript(remoteId.get(), injectedScript);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
response = injectedScript->findObject(*remoteId, object);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
*context = injectedScript->context()->context();
if (objectGroup) *objectGroup = injectedScript->objectGroupName(*remoteId);
- return Response::OK();
+ return Response::Success();
}
std::unique_ptr<protocol::Runtime::API::RemoteObject>
V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
- const StringView& groupName,
- bool generatePreview) {
+ StringView groupName, bool generatePreview) {
return wrapObject(context, value, toString16(groupName), generatePreview);
}
@@ -336,8 +341,7 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
});
}
-void V8InspectorSessionImpl::dispatchProtocolMessage(
- const StringView& message) {
+void V8InspectorSessionImpl::dispatchProtocolMessage(StringView message) {
using v8_crdtp::span;
using v8_crdtp::SpanFrom;
span<uint8_t> cbor;
@@ -347,32 +351,35 @@ void V8InspectorSessionImpl::dispatchProtocolMessage(
m_state->setBoolean("use_binary_protocol", true);
cbor = span<uint8_t>(message.characters8(), message.length());
} else {
- if (message.is8Bit()) {
- // We're ignoring the return value of these conversion functions
- // intentionally. It means the |parsed_message| below will be nullptr.
- ConvertJSONToCBOR(span<uint8_t>(message.characters8(), message.length()),
- &converted_cbor);
- } else {
- ConvertJSONToCBOR(
- span<uint16_t>(message.characters16(), message.length()),
- &converted_cbor);
+ // We're ignoring the return value of the conversion function
+ // intentionally. It means the |parsed_message| below will be nullptr.
+ auto status = ConvertToCBOR(message, &converted_cbor);
+ if (!status.ok()) {
+ m_channel->sendNotification(
+ serializeForFrontend(v8_crdtp::CreateErrorNotification(
+ v8_crdtp::DispatchResponse::ParseError(status.ToASCIIString()))));
+ return;
}
cbor = SpanFrom(converted_cbor);
}
- int callId;
- std::unique_ptr<protocol::Value> parsed_message =
- protocol::Value::parseBinary(cbor.data(), cbor.size());
- String16 method;
- if (m_dispatcher.parseCommand(parsed_message.get(), &callId, &method)) {
- // Pass empty string instead of the actual message to save on a conversion.
- // We're allowed to do so because fall-through is not implemented.
- m_dispatcher.dispatch(callId, method, std::move(parsed_message),
- v8_crdtp::span<uint8_t>());
+ v8_crdtp::Dispatchable dispatchable(cbor);
+ if (!dispatchable.ok()) {
+ if (dispatchable.HasCallId()) {
+ m_channel->sendNotification(serializeForFrontend(
+ v8_crdtp::CreateErrorNotification(dispatchable.DispatchError())));
+ } else {
+ m_channel->sendResponse(
+ dispatchable.CallId(),
+ serializeForFrontend(v8_crdtp::CreateErrorResponse(
+ dispatchable.CallId(), dispatchable.DispatchError())));
+ }
+ return;
}
+ m_dispatcher.Dispatch(dispatchable).Run();
}
std::vector<uint8_t> V8InspectorSessionImpl::state() {
- return std::move(*m_state).TakeSerialized();
+ return m_state->Serialize();
}
std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
@@ -425,36 +432,41 @@ V8InspectorSession::Inspectable* V8InspectorSessionImpl::inspectedObject(
}
void V8InspectorSessionImpl::schedulePauseOnNextStatement(
- const StringView& breakReason, const StringView& breakDetails) {
+ StringView breakReason, StringView breakDetails) {
+ std::vector<uint8_t> cbor;
+ ConvertToCBOR(breakDetails, &cbor);
m_debuggerAgent->schedulePauseOnNextStatement(
toString16(breakReason),
protocol::DictionaryValue::cast(
- protocol::StringUtil::parseJSON(breakDetails)));
+ protocol::Value::parseBinary(cbor.data(), cbor.size())));
}
void V8InspectorSessionImpl::cancelPauseOnNextStatement() {
m_debuggerAgent->cancelPauseOnNextStatement();
}
-void V8InspectorSessionImpl::breakProgram(const StringView& breakReason,
- const StringView& breakDetails) {
+void V8InspectorSessionImpl::breakProgram(StringView breakReason,
+ StringView breakDetails) {
+ std::vector<uint8_t> cbor;
+ ConvertToCBOR(breakDetails, &cbor);
m_debuggerAgent->breakProgram(
toString16(breakReason),
protocol::DictionaryValue::cast(
- protocol::StringUtil::parseJSON(breakDetails)));
+ protocol::Value::parseBinary(cbor.data(), cbor.size())));
}
void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
m_debuggerAgent->setSkipAllPauses(skip);
}
-void V8InspectorSessionImpl::resume() { m_debuggerAgent->resume(); }
+void V8InspectorSessionImpl::resume(bool terminateOnResume) {
+ m_debuggerAgent->resume(terminateOnResume);
+}
void V8InspectorSessionImpl::stepOver() { m_debuggerAgent->stepOver(); }
std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
-V8InspectorSessionImpl::searchInTextByLines(const StringView& text,
- const StringView& query,
+V8InspectorSessionImpl::searchInTextByLines(StringView text, StringView query,
bool caseSensitive, bool isRegex) {
// TODO(dgozman): search may operate on StringView and avoid copying |text|.
std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
@@ -467,7 +479,7 @@ V8InspectorSessionImpl::searchInTextByLines(const StringView& text,
}
void V8InspectorSessionImpl::triggerPreciseCoverageDeltaUpdate(
- const StringView& occassion) {
+ StringView occassion) {
m_profilerAgent->triggerPreciseCoverageDeltaUpdate(toString16(occassion));
}
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 31b822c47d..b3fce48df7 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -32,9 +32,11 @@ using protocol::Response;
class V8InspectorSessionImpl : public V8InspectorSession,
public protocol::FrontendChannel {
public:
- static std::unique_ptr<V8InspectorSessionImpl> create(
- V8InspectorImpl*, int contextGroupId, int sessionId,
- V8Inspector::Channel*, const StringView& state);
+ static std::unique_ptr<V8InspectorSessionImpl> create(V8InspectorImpl*,
+ int contextGroupId,
+ int sessionId,
+ V8Inspector::Channel*,
+ StringView state);
~V8InspectorSessionImpl() override;
V8InspectorImpl* inspector() const { return m_inspector; }
@@ -64,49 +66,48 @@ class V8InspectorSessionImpl : public V8InspectorSession,
void releaseObjectGroup(const String16& objectGroup);
// V8InspectorSession implementation.
- void dispatchProtocolMessage(const StringView& message) override;
+ void dispatchProtocolMessage(StringView message) override;
std::vector<uint8_t> state() override;
std::vector<std::unique_ptr<protocol::Schema::API::Domain>> supportedDomains()
override;
void addInspectedObject(
std::unique_ptr<V8InspectorSession::Inspectable>) override;
- void schedulePauseOnNextStatement(const StringView& breakReason,
- const StringView& breakDetails) override;
+ void schedulePauseOnNextStatement(StringView breakReason,
+ StringView breakDetails) override;
void cancelPauseOnNextStatement() override;
- void breakProgram(const StringView& breakReason,
- const StringView& breakDetails) override;
+ void breakProgram(StringView breakReason, StringView breakDetails) override;
void setSkipAllPauses(bool) override;
- void resume() override;
+ void resume(bool terminateOnResume = false) override;
void stepOver() override;
std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
- searchInTextByLines(const StringView& text, const StringView& query,
- bool caseSensitive, bool isRegex) override;
- void releaseObjectGroup(const StringView& objectGroup) override;
- bool unwrapObject(std::unique_ptr<StringBuffer>*, const StringView& objectId,
+ searchInTextByLines(StringView text, StringView query, bool caseSensitive,
+ bool isRegex) override;
+ void releaseObjectGroup(StringView objectGroup) override;
+ bool unwrapObject(std::unique_ptr<StringBuffer>*, StringView objectId,
v8::Local<v8::Value>*, v8::Local<v8::Context>*,
std::unique_ptr<StringBuffer>* objectGroup) override;
std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
- v8::Local<v8::Context>, v8::Local<v8::Value>, const StringView& groupName,
+ v8::Local<v8::Context>, v8::Local<v8::Value>, StringView groupName,
bool generatePreview) override;
V8InspectorSession::Inspectable* inspectedObject(unsigned num);
static const unsigned kInspectedObjectBufferSize = 5;
- void triggerPreciseCoverageDeltaUpdate(const StringView& occassion) override;
+ void triggerPreciseCoverageDeltaUpdate(StringView occassion) override;
private:
V8InspectorSessionImpl(V8InspectorImpl*, int contextGroupId, int sessionId,
- V8Inspector::Channel*, const StringView& state);
+ V8Inspector::Channel*, StringView state);
protocol::DictionaryValue* agentState(const String16& name);
// protocol::FrontendChannel implementation.
- void sendProtocolResponse(
+ void SendProtocolResponse(
int callId, std::unique_ptr<protocol::Serializable> message) override;
- void sendProtocolNotification(
+ void SendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) override;
- void fallThrough(int callId, const String16& method,
+ void FallThrough(int callId, v8_crdtp::span<uint8_t> method,
v8_crdtp::span<uint8_t> message) override;
- void flushProtocolNotifications() override;
+ void FlushProtocolNotifications() override;
std::unique_ptr<StringBuffer> serializeForFrontend(
std::unique_ptr<protocol::Serializable> message);
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 58d83aa948..873add9a19 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -27,8 +27,10 @@ static const char profilerEnabled[] = "profilerEnabled";
static const char preciseCoverageStarted[] = "preciseCoverageStarted";
static const char preciseCoverageCallCount[] = "preciseCoverageCallCount";
static const char preciseCoverageDetailed[] = "preciseCoverageDetailed";
+static const char preciseCoverageAllowTriggeredUpdates[] =
+ "preciseCoverageAllowTriggeredUpdates";
static const char typeProfileStarted[] = "typeProfileStarted";
-}
+} // namespace ProfilerAgentState
namespace {
@@ -218,14 +220,14 @@ void V8ProfilerAgentImpl::consoleProfileEnd(const String16& title) {
}
Response V8ProfilerAgentImpl::enable() {
- if (m_enabled) return Response::OK();
+ if (m_enabled) return Response::Success();
m_enabled = true;
m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::disable() {
- if (!m_enabled) return Response::OK();
+ if (!m_enabled) return Response::Success();
for (size_t i = m_startedProfiles.size(); i > 0; --i)
stopProfiling(m_startedProfiles[i - 1].m_id, false);
m_startedProfiles.clear();
@@ -234,15 +236,16 @@ Response V8ProfilerAgentImpl::disable() {
DCHECK(!m_profiler);
m_enabled = false;
m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::setSamplingInterval(int interval) {
if (m_profiler) {
- return Response::Error("Cannot change sampling interval when profiling.");
+ return Response::ServerError(
+ "Cannot change sampling interval when profiling.");
}
m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
- return Response::OK();
+ return Response::Success();
}
void V8ProfilerAgentImpl::restore() {
@@ -261,52 +264,57 @@ void V8ProfilerAgentImpl::restore() {
ProfilerAgentState::preciseCoverageCallCount, false);
bool detailed = m_state->booleanProperty(
ProfilerAgentState::preciseCoverageDetailed, false);
+ bool updatesAllowed = m_state->booleanProperty(
+ ProfilerAgentState::preciseCoverageAllowTriggeredUpdates, false);
double timestamp;
startPreciseCoverage(Maybe<bool>(callCount), Maybe<bool>(detailed),
- &timestamp);
+ Maybe<bool>(updatesAllowed), &timestamp);
}
}
Response V8ProfilerAgentImpl::start() {
- if (m_recordingCPUProfile) return Response::OK();
- if (!m_enabled) return Response::Error("Profiler is not enabled");
+ if (m_recordingCPUProfile) return Response::Success();
+ if (!m_enabled) return Response::ServerError("Profiler is not enabled");
m_recordingCPUProfile = true;
m_frontendInitiatedProfileId = nextProfileId();
startProfiling(m_frontendInitiatedProfileId);
m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, true);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::stop(
std::unique_ptr<protocol::Profiler::Profile>* profile) {
if (!m_recordingCPUProfile) {
- return Response::Error("No recording profiles found");
+ return Response::ServerError("No recording profiles found");
}
m_recordingCPUProfile = false;
std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
stopProfiling(m_frontendInitiatedProfileId, !!profile);
if (profile) {
*profile = std::move(cpuProfile);
- if (!profile->get()) return Response::Error("Profile is not found");
+ if (!profile->get()) return Response::ServerError("Profile is not found");
}
m_frontendInitiatedProfileId = String16();
m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, false);
- return Response::OK();
+ return Response::Success();
}
-Response V8ProfilerAgentImpl::startPreciseCoverage(Maybe<bool> callCount,
- Maybe<bool> detailed,
- double* out_timestamp) {
- if (!m_enabled) return Response::Error("Profiler is not enabled");
+Response V8ProfilerAgentImpl::startPreciseCoverage(
+ Maybe<bool> callCount, Maybe<bool> detailed,
+ Maybe<bool> allowTriggeredUpdates, double* out_timestamp) {
+ if (!m_enabled) return Response::ServerError("Profiler is not enabled");
*out_timestamp =
v8::base::TimeTicks::HighResolutionNow().since_origin().InSecondsF();
bool callCountValue = callCount.fromMaybe(false);
bool detailedValue = detailed.fromMaybe(false);
+ bool allowTriggeredUpdatesValue = allowTriggeredUpdates.fromMaybe(false);
m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, true);
m_state->setBoolean(ProfilerAgentState::preciseCoverageCallCount,
callCountValue);
m_state->setBoolean(ProfilerAgentState::preciseCoverageDetailed,
detailedValue);
+ m_state->setBoolean(ProfilerAgentState::preciseCoverageAllowTriggeredUpdates,
+ allowTriggeredUpdatesValue);
// BlockCount is a superset of PreciseCount. It includes block-granularity
// coverage data if it exists (at the time of writing, that's the case for
// each function recompiled after the BlockCount mode has been set); and
@@ -317,17 +325,17 @@ Response V8ProfilerAgentImpl::startPreciseCoverage(Maybe<bool> callCount,
? (detailedValue ? Mode::kBlockCount : Mode::kPreciseCount)
: (detailedValue ? Mode::kBlockBinary : Mode::kPreciseBinary);
C::SelectMode(m_isolate, mode);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::stopPreciseCoverage() {
- if (!m_enabled) return Response::Error("Profiler is not enabled");
+ if (!m_enabled) return Response::ServerError("Profiler is not enabled");
m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, false);
m_state->setBoolean(ProfilerAgentState::preciseCoverageCallCount, false);
m_state->setBoolean(ProfilerAgentState::preciseCoverageDetailed, false);
v8::debug::Coverage::SelectMode(m_isolate,
v8::debug::CoverageMode::kBestEffort);
- return Response::OK();
+ return Response::Success();
}
namespace {
@@ -395,7 +403,7 @@ Response coverageToProtocol(
.build());
}
*out_result = std::move(result);
- return Response::OK();
+ return Response::Success();
}
} // anonymous namespace
@@ -405,7 +413,7 @@ Response V8ProfilerAgentImpl::takePreciseCoverage(
double* out_timestamp) {
if (!m_state->booleanProperty(ProfilerAgentState::preciseCoverageStarted,
false)) {
- return Response::Error("Precise coverage has not been started.");
+ return Response::ServerError("Precise coverage has not been started.");
}
v8::HandleScope handle_scope(m_isolate);
v8::debug::Coverage coverage = v8::debug::Coverage::CollectPrecise(m_isolate);
@@ -420,6 +428,10 @@ void V8ProfilerAgentImpl::triggerPreciseCoverageDeltaUpdate(
false)) {
return;
}
+ if (!m_state->booleanProperty(
+ ProfilerAgentState::preciseCoverageAllowTriggeredUpdates, false)) {
+ return;
+ }
v8::HandleScope handle_scope(m_isolate);
v8::debug::Coverage coverage = v8::debug::Coverage::CollectPrecise(m_isolate);
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>
@@ -489,14 +501,14 @@ Response V8ProfilerAgentImpl::startTypeProfile() {
m_state->setBoolean(ProfilerAgentState::typeProfileStarted, true);
v8::debug::TypeProfile::SelectMode(m_isolate,
v8::debug::TypeProfileMode::kCollect);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::stopTypeProfile() {
m_state->setBoolean(ProfilerAgentState::typeProfileStarted, false);
v8::debug::TypeProfile::SelectMode(m_isolate,
v8::debug::TypeProfileMode::kNone);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::takeTypeProfile(
@@ -504,37 +516,38 @@ Response V8ProfilerAgentImpl::takeTypeProfile(
out_result) {
if (!m_state->booleanProperty(ProfilerAgentState::typeProfileStarted,
false)) {
- return Response::Error("Type profile has not been started.");
+ return Response::ServerError("Type profile has not been started.");
}
v8::HandleScope handle_scope(m_isolate);
v8::debug::TypeProfile type_profile =
v8::debug::TypeProfile::Collect(m_isolate);
*out_result = typeProfileToProtocol(m_session->inspector(), type_profile);
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::enableRuntimeCallStats() {
if (m_counters)
- return Response::Error("RuntimeCallStats collection already enabled.");
+ return Response::ServerError(
+ "RuntimeCallStats collection already enabled.");
if (V8Inspector* inspector = v8::debug::GetInspector(m_isolate))
m_counters = inspector->enableCounters();
else
- return Response::Error("No inspector found.");
+ return Response::ServerError("No inspector found.");
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::disableRuntimeCallStats() {
if (m_counters) m_counters.reset();
- return Response::OK();
+ return Response::Success();
}
Response V8ProfilerAgentImpl::getRuntimeCallStats(
std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
out_result) {
if (!m_counters)
- return Response::Error("RuntimeCallStats collection is not enabled.");
+ return Response::ServerError("RuntimeCallStats collection is not enabled.");
*out_result =
std::make_unique<protocol::Array<protocol::Profiler::CounterInfo>>();
@@ -548,7 +561,7 @@ Response V8ProfilerAgentImpl::getRuntimeCallStats(
.build());
}
- return Response::OK();
+ return Response::Success();
}
String16 V8ProfilerAgentImpl::nextProfileId() {
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index 74d7069efc..08d76ce587 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -40,6 +40,7 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
Response stop(std::unique_ptr<protocol::Profiler::Profile>*) override;
Response startPreciseCoverage(Maybe<bool> binary, Maybe<bool> detailed,
+ Maybe<bool> allow_triggered_updates,
double* out_timestamp) override;
Response stopPreciseCoverage() override;
Response takePreciseCoverage(
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index ed6cebea7c..5a2a49154c 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -32,6 +32,7 @@
#include <inttypes.h>
+#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -98,7 +99,7 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
Response response = injectedScript->wrapEvaluateResult(
maybeResultValue, tryCatch, objectGroup, wrapMode, &result,
&exceptionDetails);
- if (response.isSuccess()) {
+ if (response.IsSuccess()) {
callback->sendSuccess(std::move(result), std::move(exceptionDetails));
return true;
}
@@ -127,7 +128,7 @@ void innerCallFunctionOn(
v8::Local<v8::Value> argumentValue;
Response response = scope.injectedScript()->resolveCallArgument(
(*arguments)[i].get(), &argumentValue);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -153,7 +154,7 @@ void innerCallFunctionOn(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
Response response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -168,8 +169,8 @@ void innerCallFunctionOn(
v8::Local<v8::Value> functionValue;
if (!maybeFunctionValue.ToLocal(&functionValue) ||
!functionValue->IsFunction()) {
- callback->sendFailure(
- Response::Error("Given expression does not evaluate to a function"));
+ callback->sendFailure(Response::ServerError(
+ "Given expression does not evaluate to a function"));
return;
}
@@ -183,7 +184,7 @@ void innerCallFunctionOn(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -210,10 +211,10 @@ Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
v8::Local<v8::Context> defaultContext =
inspector->client()->ensureDefaultContextInGroup(contextGroupId);
if (defaultContext.IsEmpty())
- return Response::Error("Cannot find default execution context");
+ return Response::ServerError("Cannot find default execution context");
*contextId = InspectedContext::contextId(defaultContext);
}
- return Response::OK();
+ return Response::Success();
}
} // namespace
@@ -242,14 +243,14 @@ void V8RuntimeAgentImpl::evaluate(
int contextId = 0;
Response response = ensureContext(m_inspector, m_session->contextGroupId(),
std::move(executionContextId), &contextId);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -268,7 +269,7 @@ void V8RuntimeAgentImpl::evaluate(
V8InspectorImpl::EvaluateScope evaluateScope(scope);
if (timeout.isJust()) {
response = evaluateScope.setTimeout(timeout.fromJust() / 1000.0);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -291,7 +292,7 @@ void V8RuntimeAgentImpl::evaluate(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -319,13 +320,13 @@ void V8RuntimeAgentImpl::awaitPromise(
std::unique_ptr<AwaitPromiseCallback> callback) {
InjectedScript::ObjectScope scope(m_session, promiseObjectId);
Response response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
if (!scope.object()->IsPromise()) {
callback->sendFailure(
- Response::Error("Could not find promise with given id"));
+ Response::ServerError("Could not find promise with given id"));
return;
}
WrapMode mode = generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
@@ -345,12 +346,12 @@ void V8RuntimeAgentImpl::callFunctionOn(
Maybe<int> executionContextId, Maybe<String16> objectGroup,
std::unique_ptr<CallFunctionOnCallback> callback) {
if (objectId.isJust() && executionContextId.isJust()) {
- callback->sendFailure(Response::Error(
+ callback->sendFailure(Response::ServerError(
"ObjectId must not be specified together with executionContextId"));
return;
}
if (!objectId.isJust() && !executionContextId.isJust()) {
- callback->sendFailure(Response::Error(
+ callback->sendFailure(Response::ServerError(
"Either ObjectId or executionContextId must be specified"));
return;
}
@@ -360,7 +361,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
if (objectId.isJust()) {
InjectedScript::ObjectScope scope(m_session, objectId.fromJust());
Response response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -376,13 +377,13 @@ void V8RuntimeAgentImpl::callFunctionOn(
Response response =
ensureContext(m_inspector, m_session->contextGroupId(),
std::move(executionContextId.fromJust()), &contextId);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -409,13 +410,13 @@ Response V8RuntimeAgentImpl::getProperties(
InjectedScript::ObjectScope scope(m_session, objectId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
scope.ignoreExceptionsAndMuteConsole();
v8::MicrotasksScope microtasks_scope(m_inspector->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
if (!scope.object()->IsObject())
- return Response::Error("Value with given id is not an object");
+ return Response::ServerError("Value with given id is not an object");
v8::Local<v8::Object> object = scope.object().As<v8::Object>();
response = scope.injectedScript()->getProperties(
@@ -424,9 +425,9 @@ Response V8RuntimeAgentImpl::getProperties(
generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
: WrapMode::kNoPreview,
result, exceptionDetails);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (exceptionDetails->isJust() || accessorPropertiesOnly.fromMaybe(false))
- return Response::OK();
+ return Response::Success();
std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
internalPropertiesProtocolArray;
std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>
@@ -434,68 +435,69 @@ Response V8RuntimeAgentImpl::getProperties(
response = scope.injectedScript()->getInternalAndPrivateProperties(
object, scope.objectGroupName(), &internalPropertiesProtocolArray,
&privatePropertiesProtocolArray);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (!internalPropertiesProtocolArray->empty())
*internalProperties = std::move(internalPropertiesProtocolArray);
if (!privatePropertiesProtocolArray->empty())
*privateProperties = std::move(privatePropertiesProtocolArray);
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::releaseObject(const String16& objectId) {
InjectedScript::ObjectScope scope(m_session, objectId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
scope.injectedScript()->releaseObject(objectId);
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::releaseObjectGroup(const String16& objectGroup) {
m_session->releaseObjectGroup(objectGroup);
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::runIfWaitingForDebugger() {
m_inspector->client()->runIfWaitingForDebugger(m_session->contextGroupId());
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(bool enabled) {
m_state->setBoolean(V8RuntimeAgentImplState::customObjectFormatterEnabled,
enabled);
- if (!m_enabled) return Response::Error("Runtime agent is not enabled");
+ if (!m_enabled) return Response::ServerError("Runtime agent is not enabled");
m_session->setCustomObjectFormatterEnabled(enabled);
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::setMaxCallStackSizeToCapture(int size) {
if (size < 0) {
- return Response::Error("maxCallStackSizeToCapture should be non-negative");
+ return Response::ServerError(
+ "maxCallStackSizeToCapture should be non-negative");
}
V8StackTraceImpl::maxCallStackSizeToCapture = size;
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::discardConsoleEntries() {
V8ConsoleMessageStorage* storage =
m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
storage->clear();
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::compileScript(
const String16& expression, const String16& sourceURL, bool persistScript,
Maybe<int> executionContextId, Maybe<String16>* scriptId,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
- if (!m_enabled) return Response::Error("Runtime agent is not enabled");
+ if (!m_enabled) return Response::ServerError("Runtime agent is not enabled");
int contextId = 0;
Response response = ensureContext(m_inspector, m_session->contextGroupId(),
std::move(executionContextId), &contextId);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
v8::Local<v8::Script> script;
@@ -506,14 +508,14 @@ Response V8RuntimeAgentImpl::compileScript(
if (scope.tryCatch().HasCaught()) {
response = scope.injectedScript()->createExceptionDetails(
scope.tryCatch(), String16(), exceptionDetails);
- if (!response.isSuccess()) return response;
- return Response::OK();
+ if (!response.IsSuccess()) return response;
+ return Response::Success();
} else {
- return Response::Error("Script compilation failed");
+ return Response::ServerError("Script compilation failed");
}
}
- if (!persistScript) return Response::OK();
+ if (!persistScript) return Response::Success();
String16 scriptValueId =
String16::fromInteger(script->GetUnboundScript()->GetId());
@@ -521,7 +523,7 @@ Response V8RuntimeAgentImpl::compileScript(
new v8::Global<v8::Script>(m_inspector->isolate(), script));
m_compiledScripts[scriptValueId] = std::move(global);
*scriptId = scriptValueId;
- return Response::OK();
+ return Response::Success();
}
void V8RuntimeAgentImpl::runScript(
@@ -531,27 +533,28 @@ void V8RuntimeAgentImpl::runScript(
Maybe<bool> generatePreview, Maybe<bool> awaitPromise,
std::unique_ptr<RunScriptCallback> callback) {
if (!m_enabled) {
- callback->sendFailure(Response::Error("Runtime agent is not enabled"));
+ callback->sendFailure(
+ Response::ServerError("Runtime agent is not enabled"));
return;
}
auto it = m_compiledScripts.find(scriptId);
if (it == m_compiledScripts.end()) {
- callback->sendFailure(Response::Error("No script with given id"));
+ callback->sendFailure(Response::ServerError("No script with given id"));
return;
}
int contextId = 0;
Response response = ensureContext(m_inspector, m_session->contextGroupId(),
std::move(executionContextId), &contextId);
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -562,7 +565,7 @@ void V8RuntimeAgentImpl::runScript(
m_compiledScripts.erase(it);
v8::Local<v8::Script> script = scriptWrapper->Get(m_inspector->isolate());
if (script.IsEmpty()) {
- callback->sendFailure(Response::Error("Script execution failed"));
+ callback->sendFailure(Response::ServerError("Script execution failed"));
return;
}
@@ -578,7 +581,7 @@ void V8RuntimeAgentImpl::runScript(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
response = scope.initialize();
- if (!response.isSuccess()) {
+ if (!response.IsSuccess()) {
callback->sendFailure(response);
return;
}
@@ -603,9 +606,9 @@ Response V8RuntimeAgentImpl::queryObjects(
std::unique_ptr<protocol::Runtime::RemoteObject>* objects) {
InjectedScript::ObjectScope scope(m_session, prototypeObjectId);
Response response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
if (!scope.object()->IsObject()) {
- return Response::Error("Prototype should be instance of Object");
+ return Response::ServerError("Prototype should be instance of Object");
}
v8::Local<v8::Array> resultArray = m_inspector->debugger()->queryObjects(
scope.context(), v8::Local<v8::Object>::Cast(scope.object()));
@@ -620,11 +623,11 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
int contextId = 0;
Response response = ensureContext(m_inspector, m_session->contextGroupId(),
std::move(executionContextId), &contextId);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
v8::PersistentValueVector<v8::String> names(m_inspector->isolate());
v8::debug::GlobalLexicalScopeNames(scope.context(), &names);
@@ -633,14 +636,14 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
(*outNames)->emplace_back(
toProtocolString(m_inspector->isolate(), names.Get(i)));
}
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::getIsolateId(String16* outIsolateId) {
char buf[40];
std::snprintf(buf, sizeof(buf), "%" PRIx64, m_inspector->isolateId());
*outIsolateId = buf;
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::getHeapUsage(double* out_usedSize,
@@ -649,7 +652,7 @@ Response V8RuntimeAgentImpl::getHeapUsage(double* out_usedSize,
m_inspector->isolate()->GetHeapStatistics(&stats);
*out_usedSize = stats.used_heap_size();
*out_totalSize = stats.total_heap_size();
- return Response::OK();
+ return Response::Success();
}
void V8RuntimeAgentImpl::terminateExecution(
@@ -665,25 +668,25 @@ Response V8RuntimeAgentImpl::addBinding(const String16& name,
}
protocol::DictionaryValue* bindings =
m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (bindings->booleanProperty(name, false)) return Response::OK();
+ if (bindings->booleanProperty(name, false)) return Response::Success();
if (executionContextId.isJust()) {
int contextId = executionContextId.fromJust();
InspectedContext* context =
m_inspector->getContext(m_session->contextGroupId(), contextId);
if (!context) {
- return Response::Error(
+ return Response::ServerError(
"Cannot find execution context with given executionContextId");
}
addBinding(context, name);
// false means that we should not add this binding later.
bindings->setBoolean(name, false);
- return Response::OK();
+ return Response::Success();
}
bindings->setBoolean(name, true);
m_inspector->forEachContext(
m_session->contextGroupId(),
[&name, this](InspectedContext* context) { addBinding(context, name); });
- return Response::OK();
+ return Response::Success();
}
void V8RuntimeAgentImpl::bindingCallback(
@@ -730,9 +733,9 @@ void V8RuntimeAgentImpl::addBinding(InspectedContext* context,
Response V8RuntimeAgentImpl::removeBinding(const String16& name) {
protocol::DictionaryValue* bindings =
m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (!bindings) return Response::OK();
+ if (!bindings) return Response::Success();
bindings->remove(name);
- return Response::OK();
+ return Response::Success();
}
void V8RuntimeAgentImpl::bindingCalled(const String16& name,
@@ -770,7 +773,7 @@ void V8RuntimeAgentImpl::restore() {
}
Response V8RuntimeAgentImpl::enable() {
- if (m_enabled) return Response::OK();
+ if (m_enabled) return Response::Success();
m_inspector->client()->beginEnsureAllContextsInGroup(
m_session->contextGroupId());
m_enabled = true;
@@ -782,11 +785,11 @@ Response V8RuntimeAgentImpl::enable() {
for (const auto& message : storage->messages()) {
if (!reportMessage(message.get(), false)) break;
}
- return Response::OK();
+ return Response::Success();
}
Response V8RuntimeAgentImpl::disable() {
- if (!m_enabled) return Response::OK();
+ if (!m_enabled) return Response::Success();
m_enabled = false;
m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
m_state->remove(V8RuntimeAgentImplState::bindings);
@@ -798,7 +801,7 @@ Response V8RuntimeAgentImpl::disable() {
if (m_session->debuggerAgent() && !m_session->debuggerAgent()->enabled()) {
m_session->debuggerAgent()->setAsyncCallStackDepth(0);
}
- return Response::OK();
+ return Response::Success();
}
void V8RuntimeAgentImpl::reset() {
@@ -823,9 +826,14 @@ void V8RuntimeAgentImpl::reportExecutionContextCreated(
.setName(context->humanReadableName())
.setOrigin(context->origin())
.build();
- if (!context->auxData().isEmpty())
+ const String16& aux = context->auxData();
+ if (!aux.isEmpty()) {
+ std::vector<uint8_t> cbor;
+ v8_crdtp::json::ConvertJSONToCBOR(
+ v8_crdtp::span<uint16_t>(aux.characters16(), aux.length()), &cbor);
description->setAuxData(protocol::DictionaryValue::cast(
- protocol::StringUtil::parseJSON(context->auxData())));
+ protocol::Value::parseBinary(cbor.data(), cbor.size())));
+ }
m_frontend.executionContextCreated(std::move(description));
}
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
index ae19416d1f..9f2533bd6a 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -21,7 +21,7 @@ Response V8SchemaAgentImpl::getDomains(
*result =
std::make_unique<std::vector<std::unique_ptr<protocol::Schema::Domain>>>(
m_session->supportedDomainsImpl());
- return Response::OK();
+ return Response::Success();
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 639ba46592..dc991937c0 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -10,6 +10,10 @@
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
+using v8_crdtp::SpanFrom;
+using v8_crdtp::json::ConvertCBORToJSON;
+using v8_crdtp::json::ConvertJSONToCBOR;
+
namespace v8_inspector {
int V8StackTraceImpl::maxCallStackSizeToCapture = 200;
@@ -124,10 +128,19 @@ V8StackTraceId::V8StackTraceId(uintptr_t id,
bool should_pause)
: id(id), debugger_id(debugger_id), should_pause(should_pause) {}
-V8StackTraceId::V8StackTraceId(const StringView& json)
+V8StackTraceId::V8StackTraceId(StringView json)
: id(0), debugger_id(V8DebuggerId().pair()) {
- auto dict =
- protocol::DictionaryValue::cast(protocol::StringUtil::parseJSON(json));
+ if (json.length() == 0) return;
+ std::vector<uint8_t> cbor;
+ if (json.is8Bit()) {
+ ConvertJSONToCBOR(
+ v8_crdtp::span<uint8_t>(json.characters8(), json.length()), &cbor);
+ } else {
+ ConvertJSONToCBOR(
+ v8_crdtp::span<uint16_t>(json.characters16(), json.length()), &cbor);
+ }
+ auto dict = protocol::DictionaryValue::cast(
+ protocol::Value::parseBinary(cbor.data(), cbor.size()));
if (!dict) return;
String16 s;
if (!dict->getString(kId, &s)) return;
@@ -151,8 +164,8 @@ std::unique_ptr<StringBuffer> V8StackTraceId::ToString() {
dict->setString(kDebuggerId, V8DebuggerId(debugger_id).toString());
dict->setBoolean(kShouldPause, should_pause);
std::vector<uint8_t> json;
- std::vector<uint8_t> cbor = std::move(*dict).TakeSerialized();
- v8_crdtp::json::ConvertCBORToJSON(v8_crdtp::SpanFrom(cbor), &json);
+ v8_crdtp::json::ConvertCBORToJSON(v8_crdtp::SpanFrom(dict->Serialize()),
+ &json);
return StringBufferFrom(std::move(json));
}
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index f8d1f74e7d..f9457fd1ec 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -42,17 +42,18 @@ V8InternalValueType v8InternalValueTypeFrom(v8::Local<v8::Context> context,
Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> value, int maxDepth,
std::unique_ptr<protocol::Value>* result) {
- if (!maxDepth) return Response::Error("Object reference chain is too long");
+ if (!maxDepth)
+ return Response::ServerError("Object reference chain is too long");
maxDepth--;
if (value->IsNull() || value->IsUndefined()) {
*result = protocol::Value::null();
- return Response::OK();
+ return Response::Success();
}
if (value->IsBoolean()) {
*result =
protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
- return Response::OK();
+ return Response::Success();
}
if (value->IsNumber()) {
double doubleValue = value.As<v8::Number>()->Value();
@@ -62,16 +63,16 @@ Response toProtocolValue(v8::Local<v8::Context> context,
int intValue = static_cast<int>(doubleValue);
if (intValue == doubleValue) {
*result = protocol::FundamentalValue::create(intValue);
- return Response::OK();
+ return Response::Success();
}
}
*result = protocol::FundamentalValue::create(doubleValue);
- return Response::OK();
+ return Response::Success();
}
if (value->IsString()) {
*result = protocol::StringValue::create(
toProtocolString(context->GetIsolate(), value.As<v8::String>()));
- return Response::OK();
+ return Response::Success();
}
if (value->IsArray()) {
v8::Local<v8::Array> array = value.As<v8::Array>();
@@ -84,11 +85,11 @@ Response toProtocolValue(v8::Local<v8::Context> context,
return Response::InternalError();
std::unique_ptr<protocol::Value> element;
Response response = toProtocolValue(context, value, maxDepth, &element);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
inspectorArray->pushValue(std::move(element));
}
*result = std::move(inspectorArray);
- return Response::OK();
+ return Response::Success();
}
if (value->IsObject()) {
std::unique_ptr<protocol::DictionaryValue> jsonObject =
@@ -119,21 +120,21 @@ Response toProtocolValue(v8::Local<v8::Context> context,
std::unique_ptr<protocol::Value> propertyValue;
Response response =
toProtocolValue(context, property, maxDepth, &propertyValue);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
jsonObject->setValue(
toProtocolString(context->GetIsolate(), propertyName),
std::move(propertyValue));
}
*result = std::move(jsonObject);
- return Response::OK();
+ return Response::Success();
}
- return Response::Error("Object couldn't be returned by value");
+ return Response::ServerError("Object couldn't be returned by value");
}
Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
std::unique_ptr<protocol::Value>* result) {
- if (value->IsUndefined()) return Response::OK();
+ if (value->IsUndefined()) return Response::Success();
return toProtocolValue(context, value, 1000, result);
}
@@ -361,7 +362,7 @@ class PrimitiveValueMirror final : public ValueMirror {
.build();
if (m_value->IsNull())
(*result)->setSubtype(RemoteObject::SubtypeEnum::Null);
- return Response::OK();
+ return Response::Success();
}
void buildEntryPreview(
@@ -416,7 +417,7 @@ class NumberMirror final : public ValueMirror {
} else {
(*result)->setValue(protocol::FundamentalValue::create(m_value->Value()));
}
- return Response::OK();
+ return Response::Success();
}
void buildPropertyPreview(
v8::Local<v8::Context> context, const String16& name,
@@ -470,7 +471,7 @@ class BigIntMirror final : public ValueMirror {
.setUnserializableValue(description)
.setDescription(description)
.build();
- return Response::OK();
+ return Response::Success();
}
void buildPropertyPreview(v8::Local<v8::Context> context,
@@ -513,13 +514,13 @@ class SymbolMirror final : public ValueMirror {
v8::Local<v8::Context> context, WrapMode mode,
std::unique_ptr<RemoteObject>* result) const override {
if (mode == WrapMode::kForceValue) {
- return Response::Error("Object couldn't be returned by value");
+ return Response::ServerError("Object couldn't be returned by value");
}
*result = RemoteObject::create()
.setType(RemoteObject::TypeEnum::Symbol)
.setDescription(descriptionForSymbol(context, m_symbol))
.build();
- return Response::OK();
+ return Response::Success();
}
void buildPropertyPreview(v8::Local<v8::Context> context,
@@ -576,7 +577,7 @@ class LocationMirror final : public ValueMirror {
.setDescription("Object")
.setValue(std::move(location))
.build();
- return Response::OK();
+ return Response::Success();
}
v8::Local<v8::Value> v8Value() const override { return m_value; }
@@ -620,7 +621,7 @@ class FunctionMirror final : public ValueMirror {
if (mode == WrapMode::kForceValue) {
std::unique_ptr<protocol::Value> protocolValue;
Response response = toProtocolValue(context, m_value, &protocolValue);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
*result = RemoteObject::create()
.setType(RemoteObject::TypeEnum::Function)
.setValue(std::move(protocolValue))
@@ -633,7 +634,7 @@ class FunctionMirror final : public ValueMirror {
.setDescription(descriptionForFunction(context, m_value))
.build();
}
- return Response::OK();
+ return Response::Success();
}
void buildPropertyPreview(
@@ -881,7 +882,7 @@ class ObjectMirror final : public ValueMirror {
if (mode == WrapMode::kForceValue) {
std::unique_ptr<protocol::Value> protocolValue;
Response response = toProtocolValue(context, m_value, &protocolValue);
- if (!response.isSuccess()) return response;
+ if (!response.IsSuccess()) return response;
*result = RemoteObject::create()
.setType(RemoteObject::TypeEnum::Object)
.setValue(std::move(protocolValue))
@@ -904,7 +905,7 @@ class ObjectMirror final : public ValueMirror {
(*result)->setPreview(std::move(previewValue));
}
}
- return Response::OK();
+ return Response::Success();
}
void buildObjectPreview(
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 1c61776cdf..cc65545138 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-jump-table.h"
@@ -81,7 +82,9 @@ Register BytecodeArrayBuilder::Local(int index) const {
return Register(index);
}
-Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
+template <typename LocalIsolate>
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
+ LocalIsolate* isolate) {
DCHECK(RemainderOfBlockIsDead());
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
@@ -99,19 +102,35 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
isolate, register_count, parameter_count(), handler_table);
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
+ OffThreadIsolate* isolate);
+
#ifdef DEBUG
-int BytecodeArrayBuilder::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+int BytecodeArrayBuilder::CheckBytecodeMatches(BytecodeArray bytecode) {
+ DisallowHeapAllocation no_gc;
return bytecode_array_writer_.CheckBytecodeMatches(bytecode);
}
#endif
+template <typename LocalIsolate>
Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
DCHECK(RemainderOfBlockIsDead());
return bytecode_array_writer_.ToSourcePositionTable(isolate);
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
+ OffThreadIsolate* isolate);
+
BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
Bytecode bytecode) {
BytecodeSourceInfo source_position;
@@ -1237,32 +1256,26 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(
- BytecodeLoopHeader* loop_header, int loop_depth) {
- OutputJumpLoop(loop_header, loop_depth);
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnSmiNoFeedback(
- BytecodeJumpTable* jump_table) {
- OutputSwitchOnSmiNoFeedback(jump_table);
- return *this;
-}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+ BytecodeLoopHeader* loop_header, int loop_depth, int position) {
if (position != kNoSourcePosition) {
- // We need to attach a non-breakable source position to a stack
- // check, so we simply add it as expression position. There can be
- // a prior statement position from constructs like:
+ // We need to attach a non-breakable source position to JumpLoop for its
+ // implicit stack check, so we simply add it as expression position. There
+ // can be a prior statement position from constructs like:
//
// do var x; while (false);
//
// A Nop could be inserted for empty statements, but since no code
- // is associated with these positions, instead we force the stack
- // check's expression position which eliminates the empty
- // statement's position.
+ // is associated with these positions, instead we force the jump loop's
+ // expression position which eliminates the empty statement's position.
latest_source_info_.ForceExpressionPosition(position);
}
- OutputStackCheck();
+ OutputJumpLoop(loop_header, loop_depth);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnSmiNoFeedback(
+ BytecodeJumpTable* jump_table) {
+ OutputSwitchOnSmiNoFeedback(jump_table);
return *this;
}
@@ -1619,6 +1632,14 @@ uint32_t BytecodeArrayBuilder::GetOutputRegisterListOperand(
return static_cast<uint32_t>(reg_list.first_register().ToOperand());
}
+void BytecodeArrayBuilder::EmitFunctionStartSourcePosition(int position) {
+ bytecode_array_writer_.SetFunctionEntrySourcePosition(position);
+ // Force an expression position to make sure we have one. If the next bytecode
+ // overwrites it, it’s fine since it would mean we have a source position
+ // anyway.
+ latest_source_info_.ForceExpressionPosition(position);
+}
+
std::ostream& operator<<(std::ostream& os,
const BytecodeArrayBuilder::ToBooleanMode& mode) {
switch (mode) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 39cd4fa6f6..cad4f473a2 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
+#include "src/base/export-template.h"
#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-flags.h"
@@ -42,11 +43,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
- Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate);
- Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
#ifdef DEBUG
- int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+ int CheckBytecodeMatches(BytecodeArray bytecode);
#endif
// Get the number of parameters expected by function.
@@ -422,7 +427,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpLoop(BytecodeLoopHeader* loop_header,
- int loop_depth);
+ int loop_depth, int position);
BytecodeArrayBuilder& JumpIfTrue(ToBooleanMode mode, BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(ToBooleanMode mode, BytecodeLabel* label);
@@ -439,8 +444,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& SwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table);
- BytecodeArrayBuilder& StackCheck(int position);
-
// Sets the pending message to the value in the accumulator, and returns the
// previous pending message in the accumulator.
BytecodeArrayBuilder& SetPendingMessage();
@@ -549,6 +552,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
void OutputStarRaw(Register reg);
void OutputMovRaw(Register src, Register dest);
+ void EmitFunctionStartSourcePosition(int position);
+
// Accessors
BytecodeRegisterAllocator* register_allocator() {
return &register_allocator_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3a459b4833..a1b9d9d5f6 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/bytecode-array-writer.h"
#include "src/api/api-inl.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
@@ -36,8 +37,9 @@ BytecodeArrayWriter::BytecodeArrayWriter(
bytecodes_.reserve(512); // Derived via experimentation.
}
+template <typename LocalIsolate>
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
+ LocalIsolate* isolate, int register_count, int parameter_count,
Handle<ByteArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
@@ -52,27 +54,45 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
return bytecode_array;
}
-Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(Isolate* isolate) {
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+ Isolate* isolate, int register_count, int parameter_count,
+ Handle<ByteArray> handler_table);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+ OffThreadIsolate* isolate, int register_count, int parameter_count,
+ Handle<ByteArray> handler_table);
+
+template <typename LocalIsolate>
+Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
+ LocalIsolate* isolate) {
DCHECK(!source_position_table_builder_.Lazy());
Handle<ByteArray> source_position_table =
source_position_table_builder_.Omit()
- ? ReadOnlyRoots(isolate).empty_byte_array_handle()
+ ? isolate->factory()->empty_byte_array()
: source_position_table_builder_.ToSourcePositionTable(isolate);
return source_position_table;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
+ Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
+ OffThreadIsolate* isolate);
+
#ifdef DEBUG
-int BytecodeArrayWriter::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+int BytecodeArrayWriter::CheckBytecodeMatches(BytecodeArray bytecode) {
int mismatches = false;
int bytecode_size = static_cast<int>(bytecodes()->size());
const byte* bytecode_ptr = &bytecodes()->front();
- if (bytecode_size != bytecode->length()) mismatches = true;
+ if (bytecode_size != bytecode.length()) mismatches = true;
// If there's a mismatch only in the length of the bytecode (very unlikely)
// then the first mismatch will be the first extra bytecode.
- int first_mismatch = std::min(bytecode_size, bytecode->length());
+ int first_mismatch = std::min(bytecode_size, bytecode.length());
for (int i = 0; i < first_mismatch; ++i) {
- if (bytecode_ptr[i] != bytecode->get(i)) {
+ if (bytecode_ptr[i] != bytecode.get(i)) {
mismatches = true;
first_mismatch = i;
break;
@@ -187,6 +207,12 @@ void BytecodeArrayWriter::BindTryRegionEnd(
handler_table_builder->SetTryRegionEnd(handler_id, current_offset);
}
+void BytecodeArrayWriter::SetFunctionEntrySourcePosition(int position) {
+ bool is_statement = false;
+ source_position_table_builder_.AddPosition(
+ kFunctionEntryBytecodeOffset, SourcePosition(position), is_statement);
+}
+
void BytecodeArrayWriter::StartBasicBlock() {
InvalidateLastBytecode();
exit_seen_in_block_ = false;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 22f0296aff..c1f4266e49 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -51,15 +51,21 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void BindTryRegionEnd(HandlerTableBuilder* handler_table_builder,
int handler_id);
- Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
- int parameter_count,
+ void SetFunctionEntrySourcePosition(int position);
+
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate,
+ int register_count, int parameter_count,
Handle<ByteArray> handler_table);
- Handle<ByteArray> ToSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
#ifdef DEBUG
// Returns -1 if they match or the offset of the first mismatching byte.
- int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+ int CheckBytecodeMatches(BytecodeArray bytecode);
#endif
bool RemainderOfBlockIsDead() const { return exit_seen_in_block_; }
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 196f9d8819..18a2fc9913 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -17,6 +17,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/logging/log.h"
+#include "src/logging/off-thread-logger.h"
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
@@ -24,6 +25,7 @@
#include "src/objects/template-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -409,10 +411,7 @@ class BytecodeGenerator::ControlScopeForIteration final
LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
- loop_builder_(loop_builder) {
- generator->loop_depth_++;
- }
- ~ControlScopeForIteration() override { generator()->loop_depth_--; }
+ loop_builder_(loop_builder) {}
protected:
bool Execute(Command command, Statement* statement,
@@ -724,47 +723,72 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
DISALLOW_COPY_AND_ASSIGN(TestResultScope);
};
-// Used to build a list of global declaration initial value pairs.
-class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
+// Used to build a list of toplevel declaration data.
+class BytecodeGenerator::TopLevelDeclarationsBuilder final : public ZoneObject {
public:
+ template <typename LocalIsolate>
Handle<FixedArray> AllocateDeclarations(UnoptimizedCompilationInfo* info,
BytecodeGenerator* generator,
Handle<Script> script,
- Isolate* isolate) {
- int size = 0;
- for (Declaration* decl : *info->scope()->declarations()) {
- Variable* var = decl->var();
- if (!var->is_used()) continue;
- if (var->location() != VariableLocation::UNALLOCATED) continue;
- DCHECK_IMPLIES(decl->node_type() != AstNode::kVariableDeclaration,
- decl->node_type() == AstNode::kFunctionDeclaration);
- size += decl->node_type() == AstNode::kVariableDeclaration ? 1 : 2;
- }
-
+ LocalIsolate* isolate) {
DCHECK(has_constant_pool_entry_);
Handle<FixedArray> data =
- isolate->factory()->NewFixedArray(size, AllocationType::kOld);
+ isolate->factory()->NewFixedArray(entry_slots_, AllocationType::kOld);
int array_index = 0;
- for (Declaration* decl : *info->scope()->declarations()) {
- Variable* var = decl->var();
- if (!var->is_used()) continue;
- if (var->location() != VariableLocation::UNALLOCATED) continue;
- if (decl->node_type() == AstNode::kVariableDeclaration) {
- data->set(array_index++, *var->raw_name()->string().get<Factory>());
- } else {
- FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
- Handle<Object> sfi(Compiler::GetSharedFunctionInfo(f, script, isolate));
- // Return a null handle if any initial values can't be created. Caller
- // will set stack overflow.
- if (sfi.is_null()) return Handle<FixedArray>();
- data->set(array_index++, *sfi);
- int literal_index = generator->GetCachedCreateClosureSlot(f);
- data->set(array_index++, Smi::FromInt(literal_index));
+ if (info->scope()->is_module_scope()) {
+ for (Declaration* decl : *info->scope()->declarations()) {
+ Variable* var = decl->var();
+ if (!var->is_used()) continue;
+ if (var->location() != VariableLocation::MODULE) continue;
+#ifdef DEBUG
+ int start = array_index;
+#endif
+ if (decl->IsFunctionDeclaration()) {
+ FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
+ Handle<SharedFunctionInfo> sfi(
+ Compiler::GetSharedFunctionInfo(f, script, isolate));
+ // Return a null handle if any initial values can't be created. Caller
+ // will set stack overflow.
+ if (sfi.is_null()) return Handle<FixedArray>();
+ data->set(array_index++, *sfi);
+ int literal_index = generator->GetCachedCreateClosureSlot(f);
+ data->set(array_index++, Smi::FromInt(literal_index));
+ DCHECK(var->IsExport());
+ data->set(array_index++, Smi::FromInt(var->index()));
+ DCHECK_EQ(start + kModuleFunctionDeclarationSize, array_index);
+ } else if (var->IsExport() && var->binding_needs_init()) {
+ data->set(array_index++, Smi::FromInt(var->index()));
+ DCHECK_EQ(start + kModuleVariableDeclarationSize, array_index);
+ }
+ }
+ } else {
+ for (Declaration* decl : *info->scope()->declarations()) {
+ Variable* var = decl->var();
+ if (!var->is_used()) continue;
+ if (var->location() != VariableLocation::UNALLOCATED) continue;
+#ifdef DEBUG
+ int start = array_index;
+#endif
+ if (decl->IsVariableDeclaration()) {
+ data->set(array_index++, *var->raw_name()->string());
+ DCHECK_EQ(start + kGlobalVariableDeclarationSize, array_index);
+ } else {
+ FunctionLiteral* f = static_cast<FunctionDeclaration*>(decl)->fun();
+ Handle<SharedFunctionInfo> sfi(
+ Compiler::GetSharedFunctionInfo(f, script, isolate));
+ // Return a null handle if any initial values can't be created. Caller
+ // will set stack overflow.
+ if (sfi.is_null()) return Handle<FixedArray>();
+ data->set(array_index++, *sfi);
+ int literal_index = generator->GetCachedCreateClosureSlot(f);
+ data->set(array_index++, Smi::FromInt(literal_index));
+ DCHECK_EQ(start + kGlobalFunctionDeclarationSize, array_index);
+ }
}
}
-
+ DCHECK_EQ(array_index, data->length());
return data;
}
@@ -774,21 +798,37 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
}
void set_constant_pool_entry(size_t constant_pool_entry) {
- DCHECK(has_global_declaration());
+ DCHECK(has_top_level_declaration());
DCHECK(!has_constant_pool_entry_);
constant_pool_entry_ = constant_pool_entry;
has_constant_pool_entry_ = true;
}
- void record_global_declaration() { has_seen_global_declaration_ = true; }
- bool has_global_declaration() { return has_seen_global_declaration_; }
+ void record_global_variable_declaration() {
+ entry_slots_ += kGlobalVariableDeclarationSize;
+ }
+ void record_global_function_declaration() {
+ entry_slots_ += kGlobalFunctionDeclarationSize;
+ }
+ void record_module_variable_declaration() {
+ entry_slots_ += kModuleVariableDeclarationSize;
+ }
+ void record_module_function_declaration() {
+ entry_slots_ += kModuleFunctionDeclarationSize;
+ }
+ bool has_top_level_declaration() { return entry_slots_ > 0; }
bool processed() { return processed_; }
void mark_processed() { processed_ = true; }
private:
+ const int kGlobalVariableDeclarationSize = 1;
+ const int kGlobalFunctionDeclarationSize = 2;
+ const int kModuleVariableDeclarationSize = 1;
+ const int kModuleFunctionDeclarationSize = 3;
+
size_t constant_pool_entry_ = 0;
+ int entry_slots_ = 0;
bool has_constant_pool_entry_ = false;
- bool has_seen_global_declaration_ = false;
bool processed_ = false;
};
@@ -910,6 +950,36 @@ class BytecodeGenerator::OptionalChainNullLabelScope final {
BytecodeLabels* prev_;
};
+// LoopScope delimits the scope of {loop}, from its header to its final jump.
+// It should be constructed iff a (conceptual) back edge should be produced. In
+// the case of creating a LoopBuilder but never emitting the loop, it is valid
+// to skip the creation of LoopScope.
+class BytecodeGenerator::LoopScope final {
+ public:
+ explicit LoopScope(BytecodeGenerator* bytecode_generator, LoopBuilder* loop)
+ : bytecode_generator_(bytecode_generator),
+ parent_loop_scope_(bytecode_generator_->current_loop_scope()),
+ loop_builder_(loop) {
+ loop_builder_->LoopHeader();
+ bytecode_generator_->set_current_loop_scope(this);
+ bytecode_generator_->loop_depth_++;
+ }
+
+ ~LoopScope() {
+ bytecode_generator_->loop_depth_--;
+ bytecode_generator_->set_current_loop_scope(parent_loop_scope_);
+ DCHECK_GE(bytecode_generator_->loop_depth_, 0);
+ loop_builder_->JumpToHeader(
+ bytecode_generator_->loop_depth_,
+ parent_loop_scope_ ? parent_loop_scope_->loop_builder_ : nullptr);
+ }
+
+ private:
+ BytecodeGenerator* const bytecode_generator_;
+ LoopScope* const parent_loop_scope_;
+ LoopBuilder* const loop_builder_;
+};
+
namespace {
template <typename PropertyT>
@@ -983,7 +1053,7 @@ BytecodeGenerator::BytecodeGenerator(
current_scope_(info->scope()),
eager_inner_literals_(eager_inner_literals),
feedback_slot_cache_(new (zone()) FeedbackSlotCache(zone())),
- globals_builder_(new (zone()) GlobalDeclarationsBuilder()),
+ top_level_builder_(new (zone()) TopLevelDeclarationsBuilder()),
block_coverage_builder_(nullptr),
function_literals_(0, zone()),
native_function_literals_(0, zone()),
@@ -1000,6 +1070,7 @@ BytecodeGenerator::BytecodeGenerator(
generator_jump_table_(nullptr),
suspend_count_(0),
loop_depth_(0),
+ current_loop_scope_(nullptr),
catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
if (info->has_source_range_map()) {
@@ -1008,22 +1079,49 @@ BytecodeGenerator::BytecodeGenerator(
}
}
+namespace {
+
+template <typename Isolate>
+struct NullContextScopeHelper;
+
+template <>
+struct NullContextScopeHelper<Isolate> {
+ using Type = NullContextScope;
+};
+
+template <>
+struct NullContextScopeHelper<OffThreadIsolate> {
+ class DummyNullContextScope {
+ public:
+ explicit DummyNullContextScope(OffThreadIsolate*) {}
+ };
+ using Type = DummyNullContextScope;
+};
+
+template <typename Isolate>
+using NullContextScopeFor = typename NullContextScopeHelper<Isolate>::Type;
+
+} // namespace
+
+template <typename LocalIsolate>
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
- Isolate* isolate, Handle<Script> script) {
+ LocalIsolate* isolate, Handle<Script> script) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- NullContextScope null_context_scope(isolate);
+ NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
#endif
AllocateDeferredConstants(isolate, script);
if (block_coverage_builder_) {
- info()->set_coverage_info(
- isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots()));
+ Handle<CoverageInfo> coverage_info =
+ isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots());
+ info()->set_coverage_info(coverage_info);
if (FLAG_trace_block_coverage) {
- info()->coverage_info()->Print(info()->literal()->GetDebugName());
+ StdoutStream os;
+ coverage_info->CoverageInfoPrint(os, info()->literal()->GetDebugName());
}
}
@@ -1038,13 +1136,19 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
return bytecode_array;
}
+template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
+ Isolate* isolate, Handle<Script> script);
+template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
+ OffThreadIsolate* isolate, Handle<Script> script);
+
+template <typename LocalIsolate>
Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
- Isolate* isolate) {
+ LocalIsolate* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- NullContextScope null_context_scope(isolate);
+ NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
#endif
Handle<ByteArray> source_position_table =
@@ -1058,21 +1162,27 @@ Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
return source_position_table;
}
+template Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
+ Isolate* isolate);
+template Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
+ OffThreadIsolate* isolate);
+
#ifdef DEBUG
-int BytecodeGenerator::CheckBytecodeMatches(Handle<BytecodeArray> bytecode) {
+int BytecodeGenerator::CheckBytecodeMatches(BytecodeArray bytecode) {
return builder()->CheckBytecodeMatches(bytecode);
}
#endif
-void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
+template <typename LocalIsolate>
+void BytecodeGenerator::AllocateDeferredConstants(LocalIsolate* isolate,
Handle<Script> script) {
- if (globals_builder()->has_global_declaration()) {
+ if (top_level_builder()->has_top_level_declaration()) {
// Build global declaration pair array.
- Handle<FixedArray> declarations =
- globals_builder()->AllocateDeclarations(info(), this, script, isolate);
+ Handle<FixedArray> declarations = top_level_builder()->AllocateDeclarations(
+ info(), this, script, isolate);
if (declarations.is_null()) return SetStackOverflow();
builder()->SetDeferredConstantPoolEntry(
- globals_builder()->constant_pool_entry(), declarations);
+ top_level_builder()->constant_pool_entry(), declarations);
}
// Find or build shared function infos.
@@ -1087,6 +1197,9 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
// Find or build shared function infos for the native function templates.
for (std::pair<NativeFunctionLiteral*, size_t> literal :
native_function_literals_) {
+ // This should only happen for main-thread compilations.
+ DCHECK((std::is_same<Isolate, v8::internal::Isolate>::value));
+
NativeFunctionLiteral* expr = literal.first;
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -1142,6 +1255,18 @@ void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate,
}
}
+template void BytecodeGenerator::AllocateDeferredConstants(
+ Isolate* isolate, Handle<Script> script);
+template void BytecodeGenerator::AllocateDeferredConstants(
+ OffThreadIsolate* isolate, Handle<Script> script);
+
+namespace {
+bool NeedsContextInitialization(DeclarationScope* scope) {
+ return scope->NeedsContext() && !scope->is_script_scope() &&
+ !scope->is_module_scope();
+}
+} // namespace
+
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
@@ -1159,14 +1284,14 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
AllocateTopLevelRegisters();
- // Perform a stack-check before the body.
- builder()->StackCheck(info()->literal()->start_position());
+ builder()->EmitFunctionStartSourcePosition(
+ info()->literal()->start_position());
if (info()->literal()->CanSuspend()) {
BuildGeneratorPrologue();
}
- if (closure_scope()->NeedsContext() && !closure_scope()->is_script_scope()) {
+ if (NeedsContextInitialization(closure_scope())) {
// Push a new inner context scope for the function.
BuildNewLocalActivationContext();
ContextScope local_function_context(this, closure_scope());
@@ -1223,6 +1348,8 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Visit declarations within the function scope.
if (closure_scope()->is_script_scope()) {
VisitGlobalDeclarations(closure_scope()->declarations());
+ } else if (closure_scope()->is_module_scope()) {
+ VisitModuleDeclarations(closure_scope()->declarations());
} else {
VisitDeclarations(closure_scope()->declarations());
}
@@ -1232,7 +1359,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// The derived constructor case is handled in VisitCallSuper.
if (IsBaseConstructor(function_kind())) {
- if (literal->requires_brand_initialization()) {
+ if (literal->class_scope_has_private_brand()) {
BuildPrivateBrandInitialization(builder()->Receiver());
}
@@ -1320,6 +1447,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED:
+ case VariableLocation::MODULE:
UNREACHABLE();
case VariableLocation::LOCAL:
if (variable->binding_needs_init()) {
@@ -1355,13 +1483,6 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
.CallRuntime(Runtime::kDeclareEvalVar, name);
break;
}
- case VariableLocation::MODULE:
- if (variable->IsExport() && variable->binding_needs_init()) {
- builder()->LoadTheHole();
- BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
- }
- // Nothing to do for imports.
- break;
}
}
@@ -1375,6 +1496,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED:
+ case VariableLocation::MODULE:
UNREACHABLE();
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
@@ -1400,12 +1522,6 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Runtime::kDeclareEvalFunction, args);
break;
}
- case VariableLocation::MODULE:
- DCHECK_EQ(variable->mode(), VariableMode::kLet);
- DCHECK(variable->IsExport());
- VisitForAccumulatorValue(decl->fun());
- BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
- break;
}
DCHECK_IMPLIES(
eager_inner_literals_ != nullptr && decl->fun()->ShouldEagerCompile(),
@@ -1430,18 +1546,60 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
}
}
+void BytecodeGenerator::BuildDeclareCall(Runtime::FunctionId id) {
+ if (!top_level_builder()->has_top_level_declaration()) return;
+ DCHECK(!top_level_builder()->processed());
+
+ top_level_builder()->set_constant_pool_entry(
+ builder()->AllocateDeferredConstantPoolEntry());
+
+ // Emit code to declare globals.
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadConstantPoolEntry(top_level_builder()->constant_pool_entry())
+ .StoreAccumulatorInRegister(args[0])
+ .MoveRegister(Register::function_closure(), args[1])
+ .CallRuntime(id, args);
+
+ top_level_builder()->mark_processed();
+}
+
+void BytecodeGenerator::VisitModuleDeclarations(Declaration::List* decls) {
+ RegisterAllocationScope register_scope(this);
+ for (Declaration* decl : *decls) {
+ Variable* var = decl->var();
+ if (!var->is_used()) continue;
+ if (var->location() == VariableLocation::MODULE) {
+ if (decl->IsFunctionDeclaration()) {
+ DCHECK(var->IsExport());
+ FunctionDeclaration* f = static_cast<FunctionDeclaration*>(decl);
+ AddToEagerLiteralsIfEager(f->fun());
+ top_level_builder()->record_module_function_declaration();
+ } else if (var->IsExport() && var->binding_needs_init()) {
+ DCHECK(decl->IsVariableDeclaration());
+ top_level_builder()->record_module_variable_declaration();
+ }
+ } else {
+ RegisterAllocationScope register_scope(this);
+ Visit(decl);
+ }
+ }
+ BuildDeclareCall(Runtime::kDeclareModuleExports);
+}
+
void BytecodeGenerator::VisitGlobalDeclarations(Declaration::List* decls) {
RegisterAllocationScope register_scope(this);
- bool has_global_declaration = false;
for (Declaration* decl : *decls) {
Variable* var = decl->var();
DCHECK(var->is_used());
if (var->location() == VariableLocation::UNALLOCATED) {
// var or function.
- has_global_declaration = true;
if (decl->IsFunctionDeclaration()) {
+ top_level_builder()->record_global_function_declaration();
FunctionDeclaration* f = static_cast<FunctionDeclaration*>(decl);
AddToEagerLiteralsIfEager(f->fun());
+ } else {
+ top_level_builder()->record_global_variable_declaration();
}
} else {
// let or const. Handled in NewScriptContext.
@@ -1450,22 +1608,7 @@ void BytecodeGenerator::VisitGlobalDeclarations(Declaration::List* decls) {
}
}
- if (!has_global_declaration) return;
- globals_builder()->record_global_declaration();
- DCHECK(!globals_builder()->processed());
-
- globals_builder()->set_constant_pool_entry(
- builder()->AllocateDeferredConstantPoolEntry());
-
- // Emit code to declare globals.
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadConstantPoolEntry(globals_builder()->constant_pool_entry())
- .StoreAccumulatorInRegister(args[0])
- .MoveRegister(Register::function_closure(), args[1])
- .CallRuntime(Runtime::kDeclareGlobals, args);
-
- globals_builder()->mark_processed();
+ BuildDeclareCall(Runtime::kDeclareGlobals);
}
void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
@@ -1717,7 +1860,6 @@ void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
loop_builder->LoopBody();
ControlScopeForIteration execution_control(this, stmt, loop_builder);
- builder()->StackCheck(stmt->position());
Visit(stmt->body());
loop_builder->BindContinueTarget();
}
@@ -1725,20 +1867,22 @@ void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond()->ToBooleanIsFalse()) {
+ // Since we know that the condition is false, we don't create a loop.
+ // Therefore, we don't create a LoopScope (and thus we don't create a header
+ // and a JumpToHeader). However, we still need to iterate once through the
+ // body.
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader(loop_depth_);
} else {
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_backbranch(zone());
VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(),
TestFallthrough::kThen);
loop_backbranch.Bind(builder());
- loop_builder.JumpToHeader(loop_depth_);
}
}
@@ -1750,7 +1894,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
return;
}
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1759,22 +1903,21 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
- LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
-
if (stmt->init() != nullptr) {
Visit(stmt->init());
}
+
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
// If the condition is known to be false there is no need to generate
// body, next or condition blocks. Init block should be generated.
return;
}
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1787,7 +1930,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
- loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
@@ -1821,7 +1963,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
{
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
@@ -1843,7 +1985,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
- loop_builder.JumpToHeader(loop_depth_);
}
builder()->Bind(&subject_undefined_label);
}
@@ -1895,7 +2036,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Register next_result = register_allocator()->NewRegister();
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
builder()->LoadTrue().StoreAccumulatorInRegister(done);
@@ -1927,8 +2068,6 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
BuildAssignment(lhs_data, Token::ASSIGN, LookupHoistingMode::kNormal);
VisitIterationBody(stmt, &loop_builder);
-
- loop_builder.JumpToHeader(loop_depth_);
},
// Finally block.
[&](Register iteration_continuation_token) {
@@ -2391,11 +2530,6 @@ void BytecodeGenerator::VisitNativeFunctionLiteral(
native_function_literals_.push_back(std::make_pair(expr, entry));
}
-void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
- VisitBlock(expr->block());
- VisitVariableProxy(expr->result());
-}
-
void BytecodeGenerator::VisitConditional(Conditional* expr) {
ConditionalControlFlowBuilder conditional_builder(
builder(), block_coverage_builder_, expr);
@@ -2747,7 +2881,7 @@ void BytecodeGenerator::BuildFillArrayWithIterator(
DCHECK(value.is_valid());
LoopBuilder loop_builder(builder(), nullptr, nullptr);
- loop_builder.LoopHeader();
+ LoopScope loop_scope(this, &loop_builder);
// Call the iterator's .next() method. Break from the loop if the `done`
// property is truthy, otherwise load the value from the iterator result and
@@ -2770,7 +2904,6 @@ void BytecodeGenerator::BuildFillArrayWithIterator(
.UnaryOperation(Token::INC, feedback_index(index_slot))
.StoreAccumulatorInRegister(index);
loop_builder.BindContinueTarget();
- loop_builder.JumpToHeader(loop_depth_);
}
void BytecodeGenerator::BuildCreateArrayLiteral(
@@ -2932,19 +3065,6 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
BuildCreateArrayLiteral(expr->values(), expr);
}
-void BytecodeGenerator::VisitStoreInArrayLiteral(StoreInArrayLiteral* expr) {
- builder()->SetExpressionAsStatementPosition(expr);
- RegisterAllocationScope register_scope(this);
- Register array = register_allocator()->NewRegister();
- Register index = register_allocator()->NewRegister();
- VisitForRegisterValue(expr->array(), array);
- VisitForRegisterValue(expr->index(), index);
- VisitForAccumulatorValue(expr->value());
- builder()->StoreInArrayLiteral(
- array, index,
- feedback_index(feedback_spec()->AddStoreInArrayLiteralICSlot()));
-}
-
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
@@ -4247,8 +4367,8 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// - One for awaiting the iterator result yielded by the delegated
// iterator
- LoopBuilder loop(builder(), nullptr, nullptr);
- loop.LoopHeader();
+ LoopBuilder loop_builder(builder(), nullptr, nullptr);
+ LoopScope loop_scope(this, &loop_builder);
{
BytecodeLabels after_switch(zone());
@@ -4329,7 +4449,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
output, ast_string_constants()->done_string(),
feedback_index(feedback_spec()->AddLoadICSlot()));
- loop.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
+ loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
// Suspend the current generator.
if (iterator_type == IteratorType::kNormal) {
@@ -4360,8 +4480,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
generator_object())
.StoreAccumulatorInRegister(resume_mode);
- loop.BindContinueTarget();
- loop.JumpToHeader(loop_depth_);
+ loop_builder.BindContinueTarget();
}
}
@@ -4553,16 +4672,37 @@ void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode()));
ClassScope* scope = private_name->scope()->AsClassScope();
if (private_name->is_static()) {
- DCHECK_NOT_NULL(scope->class_variable());
// For static private methods, the only valid receiver is the class.
// Load the class constructor.
- BuildVariableLoadForAccumulatorValue(scope->class_variable(),
- HoleCheckMode::kElided);
- BytecodeLabel return_check;
- builder()->CompareReference(object).JumpIfTrue(
- ToBooleanMode::kAlreadyBoolean, &return_check);
- BuildInvalidPropertyAccess(tmpl, property);
- builder()->Bind(&return_check);
+ if (scope->class_variable() == nullptr) {
+ // If the static private method has not been used used in source
+ // code (either explicitly or through the presence of eval), but is
+ // accessed by the debugger at runtime, reference to the class variable
+ // is not available since it was not be context-allocated. Therefore we
+ // can't build a branch check, and throw an ReferenceError as if the
+ // method was optimized away.
+ // TODO(joyee): get a reference to the class constructor through
+ // something other than scope->class_variable() in this scenario.
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(
+ MessageTemplate::
+ kInvalidUnusedPrivateStaticMethodAccessedByDebugger))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(private_name->raw_name())
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewError, args)
+ .Throw();
+ } else {
+ BuildVariableLoadForAccumulatorValue(scope->class_variable(),
+ HoleCheckMode::kElided);
+ BytecodeLabel return_check;
+ builder()->CompareReference(object).JumpIfTrue(
+ ToBooleanMode::kAlreadyBoolean, &return_check);
+ BuildInvalidPropertyAccess(tmpl, property);
+ builder()->Bind(&return_check);
+ }
} else {
BuildVariableLoadForAccumulatorValue(scope->brand(),
HoleCheckMode::kElided);
@@ -4901,7 +5041,7 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
Register instance = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(instance);
- if (info()->literal()->requires_brand_initialization()) {
+ if (info()->literal()->class_scope_has_private_brand()) {
BuildPrivateBrandInitialization(instance);
}
@@ -5999,37 +6139,23 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
DCHECK_EQ(current_scope(), closure_scope());
// Create the appropriate context.
- if (scope->is_module_scope()) {
- // We don't need to do anything for the outer script scope.
- DCHECK(scope->outer_scope()->is_script_scope());
-
- // A JSFunction representing a module is called with the module object as
- // its sole argument.
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->MoveRegister(builder()->Parameter(0), args[0])
- .LoadLiteral(scope)
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kPushModuleContext, args);
- } else {
- DCHECK(scope->is_function_scope() || scope->is_eval_scope());
- int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- switch (scope->scope_type()) {
- case EVAL_SCOPE:
- builder()->CreateEvalContext(scope, slot_count);
- break;
- case FUNCTION_SCOPE:
- builder()->CreateFunctionContext(scope, slot_count);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- Register arg = register_allocator()->NewRegister();
- builder()->LoadLiteral(scope).StoreAccumulatorInRegister(arg).CallRuntime(
- Runtime::kNewFunctionContext, arg);
+ DCHECK(scope->is_function_scope() || scope->is_eval_scope());
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
+ switch (scope->scope_type()) {
+ case EVAL_SCOPE:
+ builder()->CreateEvalContext(scope, slot_count);
+ break;
+ case FUNCTION_SCOPE:
+ builder()->CreateFunctionContext(scope, slot_count);
+ break;
+ default:
+ UNREACHABLE();
}
+ } else {
+ Register arg = register_allocator()->NewRegister();
+ builder()->LoadLiteral(scope).StoreAccumulatorInRegister(arg).CallRuntime(
+ Runtime::kNewFunctionContext, arg);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 15b33bcac3..aa461d523c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -24,7 +24,7 @@ enum class SourceRangeKind;
namespace interpreter {
-class GlobalDeclarationsBuilder;
+class TopLevelDeclarationsBuilder;
class LoopBuilder;
class BlockCoverageBuilder;
class BytecodeJumpTable;
@@ -37,12 +37,14 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
std::vector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
- Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
+ template <typename LocalIsolate>
+ Handle<BytecodeArray> FinalizeBytecode(LocalIsolate* isolate,
Handle<Script> script);
- Handle<ByteArray> FinalizeSourcePositionTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ Handle<ByteArray> FinalizeSourcePositionTable(LocalIsolate* isolate);
#ifdef DEBUG
- int CheckBytecodeMatches(Handle<BytecodeArray> bytecode);
+ int CheckBytecodeMatches(BytecodeArray bytecode);
#endif
#define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -50,11 +52,13 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
#undef DECLARE_VISIT
// Visiting function for declarations list and statements are overridden.
+ void VisitModuleDeclarations(Declaration::List* declarations);
void VisitGlobalDeclarations(Declaration::List* declarations);
void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(const ZonePtrList<Statement>* statments);
private:
+ class AccumulatorPreservingScope;
class ContextScope;
class ControlScope;
class ControlScopeForBreakable;
@@ -63,17 +67,17 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class ControlScopeForTryCatch;
class ControlScopeForTryFinally;
class CurrentScope;
- class ExpressionResultScope;
class EffectResultScope;
+ class ExpressionResultScope;
class FeedbackSlotCache;
- class GlobalDeclarationsBuilder;
class IteratorRecord;
+ class LoopScope;
class NaryCodeCoverageSlots;
+ class OptionalChainNullLabelScope;
class RegisterAllocationScope;
- class AccumulatorPreservingScope;
class TestResultScope;
+ class TopLevelDeclarationsBuilder;
class ValueResultScope;
- class OptionalChainNullLabelScope;
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
@@ -160,7 +164,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
};
void GenerateBytecodeBody();
- void AllocateDeferredConstants(Isolate* isolate, Handle<Script> script);
+ template <typename LocalIsolate>
+ void AllocateDeferredConstants(LocalIsolate* isolate, Handle<Script> script);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -221,6 +226,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildThisVariableLoad();
+ void BuildDeclareCall(Runtime::FunctionId id);
+
Expression* GetDestructuringDefaultValue(Expression** target);
void BuildDestructuringArrayAssignment(
ArrayLiteral* pattern, Token::Value op,
@@ -462,9 +469,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return builder()->register_allocator();
}
- GlobalDeclarationsBuilder* globals_builder() {
- DCHECK_NOT_NULL(globals_builder_);
- return globals_builder_;
+ TopLevelDeclarationsBuilder* top_level_builder() {
+ DCHECK_NOT_NULL(top_level_builder_);
+ return top_level_builder_;
}
inline LanguageMode language_mode() const;
inline FunctionKind function_kind() const;
@@ -482,6 +489,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
catch_prediction_ = value;
}
+ LoopScope* current_loop_scope() const { return current_loop_scope_; }
+ void set_current_loop_scope(LoopScope* loop_scope) {
+ current_loop_scope_ = loop_scope;
+ }
+
Zone* zone_;
BytecodeArrayBuilder builder_;
UnoptimizedCompilationInfo* info_;
@@ -494,7 +506,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
FeedbackSlotCache* feedback_slot_cache_;
- GlobalDeclarationsBuilder* globals_builder_;
+ TopLevelDeclarationsBuilder* top_level_builder_;
BlockCoverageBuilder* block_coverage_builder_;
ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
@@ -518,8 +530,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeJumpTable* generator_jump_table_;
int suspend_count_;
+ // TODO(solanes): assess if we can move loop_depth_ into LoopScope.
int loop_depth_;
+ LoopScope* current_loop_scope_;
+
HandlerTable::CatchPrediction catch_prediction_;
};
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index a5d2e6e50a..4f953341d4 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -37,7 +37,7 @@ namespace interpreter {
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort) \
- V(NativeContextIndex, OperandTypeInfo::kScalableUnsignedByte)
+ V(NativeContextIndex, OperandTypeInfo::kFixedUnsignedByte)
// Carefully ordered for operand type range checks below.
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index d75e45967b..ea6814db81 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -233,8 +233,13 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// a vector of register metadata.
// There is at least one parameter, which is the JS receiver.
DCHECK_NE(parameter_count, 0);
+#ifdef V8_REVERSE_JSARGS
+ int first_slot_index = parameter_count - 1;
+#else
+ int first_slot_index = 0;
+#endif
register_info_table_offset_ =
- -Register::FromParameterIndex(0, parameter_count).index();
+ -Register::FromParameterIndex(first_slot_index, parameter_count).index();
// Initialize register map for parameters, locals, and the
// accumulator.
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index 56f6297016..4e56c3b411 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -8,10 +8,17 @@ namespace v8 {
namespace internal {
namespace interpreter {
+#ifdef V8_REVERSE_JSARGS
+static const int kFirstParamRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kFirstParamFromFp) /
+ kSystemPointerSize;
+#else
static const int kLastParamRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kLastParamFromFp) /
kSystemPointerSize;
+#endif
static const int kFunctionClosureRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kFunctionOffset) /
@@ -36,14 +43,22 @@ static const int kCallerPCOffsetRegisterIndex =
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
DCHECK_LT(index, parameter_count);
+#ifdef V8_REVERSE_JSARGS
+ int register_index = kFirstParamRegisterIndex - index;
+#else
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
+#endif
DCHECK_LT(register_index, 0);
return Register(register_index);
}
int Register::ToParameterIndex(int parameter_count) const {
DCHECK(is_parameter());
+#ifdef V8_REVERSE_JSARGS
+ return kFirstParamRegisterIndex - index();
+#else
return index() - kLastParamRegisterIndex + parameter_count - 1;
+#endif
}
Register Register::function_closure() {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 80f9e4d311..0b638418d1 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -333,9 +333,6 @@ namespace interpreter {
OperandType::kRegPair, OperandType::kIdx) \
V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
\
- /* Perform a stack guard check */ \
- V(StackCheck, AccumulatorUse::kNone) \
- \
/* Update the pending message */ \
V(SetPendingMessage, AccumulatorUse::kReadWrite) \
\
@@ -644,10 +641,11 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
}
// Return true if |bytecode| is a jump without effects,
- // e.g. any jump excluding those that include type coercion like
- // JumpIfTrueToBoolean.
+ // e.g. any jump excluding those that include type coercion like
+ // JumpIfTrueToBoolean, and JumpLoop due to having an implicit StackCheck.
static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
- return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+ return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode) &&
+ bytecode != Bytecode::kJumpLoop;
}
// Returns true if the bytecode is a switch.
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index f36556f1d4..feb3abab95 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -13,6 +13,7 @@
#include "src/ast/scopes.h"
#include "src/base/functional.h"
#include "src/execution/isolate.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -64,8 +65,9 @@ const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
}
#if DEBUG
+template <typename LocalIsolate>
void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
- Isolate* isolate) const {
+ LocalIsolate* isolate) const {
std::set<Smi> smis;
std::set<double> heap_numbers;
std::set<const AstRawString*> strings;
@@ -166,8 +168,9 @@ ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
UNREACHABLE();
}
+template <typename LocalIsolate>
MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
- Isolate* isolate) const {
+ LocalIsolate* isolate) const {
const ConstantArraySlice* slice = IndexToSlice(index);
DCHECK_LT(index, slice->capacity());
if (index < slice->start_index() + slice->size()) {
@@ -177,7 +180,15 @@ MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
return MaybeHandle<Object>();
}
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
+ Isolate* isolate) const;
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ MaybeHandle<Object> ConstantArrayBuilder::At(
+ size_t index, OffThreadIsolate* isolate) const;
+
+template <typename LocalIsolate>
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(LocalIsolate* isolate) {
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
static_cast<int>(size()), AllocationType::kOld);
int array_index = 0;
@@ -207,6 +218,12 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
return fixed_array;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(
+ OffThreadIsolate* isolate);
+
size_t ConstantArrayBuilder::Insert(Smi smi) {
auto entry = smi_map_.find(smi);
if (entry == smi_map_.end()) {
@@ -362,7 +379,9 @@ void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
OperandSizeToSlice(operand_size)->Unreserve();
}
-Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
+template <typename LocalIsolate>
+Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
+ LocalIsolate* isolate) const {
switch (tag_) {
case Tag::kDeferred:
// We shouldn't have any deferred entries by now.
@@ -376,9 +395,10 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
// TODO(leszeks): There's probably a better value we could use here.
return isolate->factory()->the_hole_value();
case Tag::kRawString:
- return raw_string_->string().get<Factory>();
+ return raw_string_->string();
case Tag::kHeapNumber:
- return isolate->factory()->NewNumber<AllocationType::kOld>(heap_number_);
+ return isolate->factory()->template NewNumber<AllocationType::kOld>(
+ heap_number_);
case Tag::kBigInt:
// This should never fail: the parser will never create a BigInt
// literal that cannot be allocated.
@@ -394,6 +414,11 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
UNREACHABLE();
}
+template Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
+ Isolate* isolate) const;
+template Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
+ OffThreadIsolate* isolate) const;
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 968a0cadd5..3376f9a2cc 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -7,6 +7,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/common/globals.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects/smi.h"
#include "src/utils/identity-map.h"
@@ -52,12 +53,16 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
explicit ConstantArrayBuilder(Zone* zone);
// Generate a fixed array of constant handles based on inserted objects.
- Handle<FixedArray> ToFixedArray(Isolate* isolate);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<FixedArray> ToFixedArray(LocalIsolate* isolate);
// Returns the object, as a handle in |isolate|, that is in the constant pool
// array at index |index|. Returns null if there is no handle at this index.
// Only expected to be used in tests.
- MaybeHandle<Object> At(size_t index, Isolate* isolate) const;
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ MaybeHandle<Object> At(size_t index, LocalIsolate* isolate) const;
// Returns the number of elements in the array.
size_t size() const;
@@ -150,7 +155,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
smi_ = smi;
}
- Handle<Object> ToHandle(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<Object> ToHandle(LocalIsolate* isolate) const;
private:
explicit Entry(Tag tag) : tag_(tag) {}
@@ -199,7 +205,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
const Entry& At(size_t index) const;
#if DEBUG
- void CheckAllElementsAreUnique(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ void CheckAllElementsAreUnique(LocalIsolate* isolate) const;
#endif
inline size_t available() const { return capacity() - reserved() - size(); }
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 3fa1274f82..7062550d30 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -47,6 +47,7 @@ void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
LoopBuilder::~LoopBuilder() {
DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
+ DCHECK(end_labels_.empty() || end_labels_.is_bound());
}
void LoopBuilder::LoopHeader() {
@@ -54,7 +55,8 @@ void LoopBuilder::LoopHeader() {
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
- DCHECK(break_labels_.empty() && continue_labels_.empty());
+ DCHECK(break_labels_.empty() && continue_labels_.empty() &&
+ end_labels_.empty());
builder()->Bind(&loop_header_);
}
@@ -64,17 +66,30 @@ void LoopBuilder::LoopBody() {
}
}
-void LoopBuilder::JumpToHeader(int loop_depth) {
- // Pass the proper loop nesting level to the backwards branch, to trigger
- // on-stack replacement when armed for the given loop nesting depth.
- int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
- // Loop must have closed form, i.e. all loop elements are within the loop,
- // the loop header precedes the body and next elements in the loop.
- builder()->JumpLoop(&loop_header_, level);
+void LoopBuilder::JumpToHeader(int loop_depth, LoopBuilder* const parent_loop) {
+ BindLoopEnd();
+ if (parent_loop &&
+ loop_header_.offset() == parent_loop->loop_header_.offset()) {
+ // TurboFan can't cope with multiple loops that have the same loop header
+ // bytecode offset. If we have an inner loop with the same header offset
+ // than its parent loop, we do not create a JumpLoop bytecode. Instead, we
+ // Jump to our parent's JumpToHeader which in turn can be a JumpLoop or, iff
+ // they are a nested inner loop too, a Jump to its parent's JumpToHeader.
+ parent_loop->JumpToLoopEnd();
+ } else {
+ // Pass the proper loop nesting level to the backwards branch, to trigger
+ // on-stack replacement when armed for the given loop nesting depth.
+ int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ builder()->JumpLoop(&loop_header_, level, source_position_);
+ }
}
void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
+void LoopBuilder::BindLoopEnd() { end_labels_.Bind(builder()); }
+
SwitchBuilder::~SwitchBuilder() {
#ifdef DEBUG
for (auto site : case_sites_) {
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index d4f2d11e7c..ac68947e1a 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -9,6 +9,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/interpreter/block-coverage-builder.h"
+#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecode-label.h"
#include "src/zone/zone-containers.h"
@@ -79,7 +80,6 @@ class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
BlockCoverageBuilder* block_coverage_builder_;
};
-
// Class to track control flow for block statements (which can break in JS).
class V8_EXPORT_PRIVATE BlockBuilder final
: public BreakableControlFlowBuilder {
@@ -91,7 +91,6 @@ class V8_EXPORT_PRIVATE BlockBuilder final
statement) {}
};
-
// A class to help with co-ordinating break and continue statements with
// their loop.
class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
@@ -99,18 +98,20 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
LoopBuilder(BytecodeArrayBuilder* builder,
BlockCoverageBuilder* block_coverage_builder, AstNode* node)
: BreakableControlFlowBuilder(builder, block_coverage_builder, node),
- continue_labels_(builder->zone()) {
+ continue_labels_(builder->zone()),
+ end_labels_(builder->zone()) {
if (block_coverage_builder_ != nullptr) {
block_coverage_body_slot_ =
block_coverage_builder_->AllocateBlockCoverageSlot(
node, SourceRangeKind::kBody);
}
+ source_position_ = node ? node->position() : kNoSourcePosition;
}
~LoopBuilder() override;
void LoopHeader();
void LoopBody();
- void JumpToHeader(int loop_depth);
+ void JumpToHeader(int loop_depth, LoopBuilder* const parent_loop);
void BindContinueTarget();
// This method is called when visiting continue statements in the AST.
@@ -121,15 +122,28 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
private:
+ // Emit a Jump to our parent_loop_'s end label which could be a JumpLoop or,
+ // iff they are a nested inner loop with the same loop header bytecode offset
+ // as their parent's, a Jump to its parent's end label.
+ void JumpToLoopEnd() { EmitJump(&end_labels_); }
+ void BindLoopEnd();
+
BytecodeLoopHeader loop_header_;
// Unbound labels that identify jumps for continue statements in the code and
// jumps from checking the loop condition to the header for do-while loops.
BytecodeLabels continue_labels_;
+ // Unbound labels that identify jumps for nested inner loops which share the
+ // same header offset as this loop. Said inner loops will Jump to our end
+ // label, which could be a JumpLoop or, iff we are a nested inner loop too, a
+ // Jump to our parent's end label.
+ BytecodeLabels end_labels_;
+
int block_coverage_body_slot_;
-};
+ int source_position_;
+};
// A class to help with co-ordinating break statements with their switch.
class V8_EXPORT_PRIVATE SwitchBuilder final
@@ -165,7 +179,6 @@ class V8_EXPORT_PRIVATE SwitchBuilder final
ZoneVector<BytecodeLabel> case_sites_;
};
-
// A class to help with co-ordinating control flow in try-catch statements.
class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
public:
@@ -194,7 +207,6 @@ class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
TryCatchStatement* statement_;
};
-
// A class to help with co-ordinating control flow in try-finally statements.
class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
public:
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 831d31d09f..91c6e819c1 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -15,7 +15,8 @@ namespace interpreter {
HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
+template <typename LocalIsolate>
+Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(LocalIsolate* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
Handle<ByteArray> table_byte_array = isolate->factory()->NewByteArray(
HandlerTable::LengthForRange(handler_table_size), AllocationType::kOld);
@@ -31,6 +32,10 @@ Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
return table_byte_array;
}
+template Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(
+ Isolate* isolate);
+template Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(
+ OffThreadIsolate* isolate);
int HandlerTableBuilder::NewHandlerEntry() {
int handler_id = static_cast<int>(entries_.size());
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 66b8d1f937..9bf2b17258 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -28,7 +28,8 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final {
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- Handle<ByteArray> ToHandlerTable(Isolate* isolate);
+ template <typename LocalIsolate>
+ Handle<ByteArray> ToHandlerTable(LocalIsolate* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 971642344b..eaea1c91dd 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -122,8 +122,8 @@ void InterpreterAssembler::SaveBytecodeOffset() {
IntPtrConstant(payload_offset),
TruncateIntPtrToInt32(bytecode_offset));
} else {
- StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
- IntPtrConstant(store_offset), SmiTag(bytecode_offset));
+ StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
+ SmiTag(bytecode_offset));
}
}
@@ -265,11 +265,9 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
#if V8_TARGET_LITTLE_ENDIAN
index += 4;
#endif
- return ChangeInt32ToIntPtr(
- Load(MachineType::Int32(), base, IntPtrConstant(index)));
+ return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
} else {
- return SmiToIntPtr(
- Load(MachineType::TaggedSigned(), base, IntPtrConstant(index)));
+ return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
}
}
@@ -625,6 +623,13 @@ TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
}
+TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
+ int operand_index) {
+ TNode<IntPtrT> index =
+ ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
+ return IntPtrToTaggedIndex(index);
+}
+
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_EQ(OperandType::kIdx,
@@ -766,9 +771,15 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
+#ifdef V8_REVERSE_JSARGS
+ TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
+ context, function, arg_count, args...,
+ UndefinedConstant());
+#else
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count,
UndefinedConstant(), args...);
+#endif
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
@@ -1476,7 +1487,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
+ V8_TARGET_ARCH_PPC64
return true;
#else
#error "Unknown Architecture"
@@ -1523,9 +1535,14 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
// Iterate over parameters and write them into the array.
Label loop(this, &var_index), done_loop(this);
+#ifdef V8_REVERSE_JSARGS
+ TNode<IntPtrT> reg_base =
+ IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
+#else
TNode<IntPtrT> reg_base = IntPtrAdd(
IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
formal_parameter_count_intptr);
+#endif
Goto(&loop);
BIND(&loop);
@@ -1534,7 +1551,11 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
&done_loop);
+#ifdef V8_REVERSE_JSARGS
+ TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
+#else
TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
+#endif
TNode<Object> value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 006b247794..729e23c7a6 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -38,6 +38,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the smi index immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<Smi> BytecodeOperandIdxSmi(int operand_index);
+ // Returns the TaggedIndex immediate for bytecode operand |operand_index|
+ // in the current bytecode.
+ TNode<TaggedIndex> BytecodeOperandIdxTaggedIndex(int operand_index);
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<Uint32T> BytecodeOperandUImm(int operand_index);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 50ceaf462f..6b8b7135e0 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -168,12 +168,8 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Dispatch();
});
- LazyNode<Smi> lazy_smi_slot = [=] {
- return SmiTag(Signed(BytecodeOperandIdx(slot_operand_index)));
- };
-
- LazyNode<UintPtrT> lazy_slot = [=] {
- return BytecodeOperandIdx(slot_operand_index);
+ LazyNode<TaggedIndex> lazy_slot = [=] {
+ return BytecodeOperandIdxTaggedIndex(slot_operand_index);
};
LazyNode<Context> lazy_context = [=] { return GetContext(); };
@@ -184,9 +180,8 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
return name;
};
- accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_smi_slot, lazy_slot,
- lazy_context, lazy_name, typeof_mode,
- &exit_point);
+ accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_slot, lazy_context,
+ lazy_name, typeof_mode, &exit_point);
}
};
@@ -222,14 +217,13 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
// Store the global via the StoreGlobalIC.
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
Label no_feedback(this, Label::kDeferred), end(this);
GotoIf(IsUndefined(maybe_vector), &no_feedback);
- CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, smi_slot,
+ CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot,
maybe_vector);
Goto(&end);
@@ -514,13 +508,14 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
// constant pool entry <name_index>.
IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> feedback_slot = BytecodeOperandIdx(2);
// Load receiver.
TNode<Object> recv = LoadRegisterAtOperandIndex(0);
// Load the name and context lazily.
- LazyNode<Smi> lazy_smi_slot = [=] { return SmiTag(Signed(feedback_slot)); };
+ LazyNode<TaggedIndex> lazy_slot = [=] {
+ return BytecodeOperandIdxTaggedIndex(2);
+ };
LazyNode<Name> lazy_name = [=] {
return CAST(LoadConstantPoolEntryAtOperandIndex(1));
};
@@ -530,8 +525,8 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
TVARIABLE(Object, var_result);
ExitPoint exit_point(this, &done, &var_result);
- AccessorAssembler::LazyLoadICParameters params(
- lazy_context, recv, lazy_name, lazy_smi_slot, feedback_vector);
+ AccessorAssembler::LazyLoadICParameters params(lazy_context, recv, lazy_name,
+ lazy_slot, feedback_vector);
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
@@ -562,14 +557,13 @@ IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) {
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
- smi_slot, feedback_vector);
+ var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name, slot,
+ feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -586,14 +580,13 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, smi_slot, maybe_vector);
+ value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -649,14 +642,13 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Object> name = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
- value, smi_slot, maybe_vector);
+ value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -674,14 +666,13 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<Object> array = LoadRegisterAtOperandIndex(0);
TNode<Object> index = LoadRegisterAtOperandIndex(1);
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
- index, value, smi_slot, feedback_vector);
+ index, value, slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -705,13 +696,13 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
TNode<Smi> flags =
SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
- TNode<Smi> vector_index = BytecodeOperandIdxSmi(3);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(3);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
- value, flags, feedback_vector, vector_index);
+ value, flags, feedback_vector, slot);
Dispatch();
}
@@ -1586,17 +1577,32 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
+#ifdef V8_REVERSE_JSARGS
+ CallJSAndDispatch(
+ function, context, Int32Constant(arg_count), receiver_mode,
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
+#else
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
+#endif
break;
case 3:
+#ifdef V8_REVERSE_JSARGS
+ CallJSAndDispatch(
+ function, context, Int32Constant(arg_count), receiver_mode,
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
+#else
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
+#endif
break;
default:
UNREACHABLE();
@@ -1874,14 +1880,13 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) {
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
TNode<Object> name = LoadRegisterAtOperandIndex(0);
TNode<Object> object = GetAccumulator();
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name,
- smi_slot, feedback_vector);
+ var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name, slot,
+ feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -1902,7 +1907,8 @@ IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
// Record feedback for the {callable} in the {feedback_vector}.
CollectCallableFeedback(callable, context, CAST(maybe_feedback_vector),
- slot_id);
+ slot_id,
+ CallableFeedbackMode::kDontCollectFeedbackCell);
Goto(&feedback_done);
BIND(&feedback_done);
@@ -2361,12 +2367,15 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth>
//
// Jump by the number of bytes represented by the immediate operand |imm|. Also
-// performs a loop nesting check and potentially triggers OSR in case the
-// current OSR level matches (or exceeds) the specified |loop_depth|.
+// performs a loop nesting check, a stack check, and potentially triggers OSR in
+// case the current OSR level matches (or exceeds) the specified |loop_depth|.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
TNode<Int8T> osr_level = LoadOsrNestingLevel();
+ TNode<Context> context = GetContext();
+
+ PerformStackCheck(context);
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
@@ -2381,7 +2390,6 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
TNode<Code> target = HeapConstant(callable.code());
- TNode<Context> context = GetContext();
CallStub(callable.descriptor(), target, context);
JumpBackward(relative_jump);
}
@@ -2425,7 +2433,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
TNode<Object> pattern = LoadConstantPoolEntryAtOperandIndex(0);
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Smi> flags =
SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2)));
TNode<Context> context = GetContext();
@@ -2434,7 +2442,7 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
result = constructor_assembler.EmitCreateRegExpLiteral(
- feedback_vector, slot_id, pattern, flags, context);
+ feedback_vector, slot, pattern, flags, context);
SetAccumulator(result.value());
Dispatch();
}
@@ -2445,7 +2453,7 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Context> context = GetContext();
TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
@@ -2461,7 +2469,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
{
ConstructorBuiltinsAssembler constructor_assembler(state());
TNode<JSArray> result = constructor_assembler.EmitCreateShallowArrayLiteral(
- CAST(feedback_vector), slot_id, context, &call_runtime,
+ CAST(feedback_vector), slot, context, &call_runtime,
TRACK_ALLOCATION_SITE);
SetAccumulator(result);
Dispatch();
@@ -2476,7 +2484,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
TNode<Object> constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
TNode<Object> result =
CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
- SmiTag(Signed(slot_id)), constant_elements, flags);
+ slot, constant_elements, flags);
SetAccumulator(result);
Dispatch();
}
@@ -2487,7 +2495,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
// Creates an empty JSArray literal for literal index <literal_idx>.
IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(0);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(0);
TNode<Context> context = GetContext();
Label no_feedback(this, Label::kDeferred), end(this);
@@ -2496,7 +2504,7 @@ IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
result = constructor_assembler.EmitCreateEmptyArrayLiteral(
- CAST(maybe_feedback_vector), slot_id, context);
+ CAST(maybe_feedback_vector), slot, context);
Goto(&end);
BIND(&no_feedback);
@@ -2534,7 +2542,7 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) {
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<UintPtrT> slot_id = BytecodeOperandIdx(1);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2);
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
@@ -2552,7 +2560,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
TNode<HeapObject> result =
constructor_assembler.EmitCreateShallowObjectLiteral(
- CAST(feedback_vector), slot_id, &if_not_fast_clone);
+ CAST(feedback_vector), slot, &if_not_fast_clone);
SetAccumulator(result);
Dispatch();
}
@@ -2569,9 +2577,9 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
bytecode_flags);
TNode<Smi> flags = SmiTag(Signed(flags_raw));
- TNode<Object> result = CallRuntime(Runtime::kCreateObjectLiteral, context,
- feedback_vector, SmiTag(Signed(slot_id)),
- object_boilerplate_description, flags);
+ TNode<Object> result =
+ CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
+ slot, object_boilerplate_description, flags);
SetAccumulator(result);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -2600,14 +2608,13 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
TNode<UintPtrT> raw_flags =
DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags);
TNode<Smi> smi_flags = SmiTag(Signed(raw_flags));
- TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> smi_slot = SmiTag(raw_slot);
+ TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(2);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
var_result = CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags,
- smi_slot, maybe_feedback_vector);
+ slot, maybe_feedback_vector);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -2836,15 +2843,6 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
Dispatch();
}
-// StackCheck
-//
-// Performs a stack guard check.
-IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
- TNode<Context> context = GetContext();
- PerformStackCheck(context);
- Dispatch();
-}
-
// SetPendingMessage
//
// Sets the pending message to the value in the accumulator, and returns the
@@ -2852,7 +2850,8 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
TNode<ExternalReference> pending_message = ExternalConstant(
ExternalReference::address_of_pending_message_obj(isolate()));
- TNode<HeapObject> previous_message = Load<HeapObject>(pending_message);
+ TNode<HeapObject> previous_message =
+ UncheckedCast<HeapObject>(LoadFullTagged(pending_message));
TNode<Object> new_message = GetAccumulator();
StoreFullTaggedNoWriteBarrier(pending_message, new_message);
SetAccumulator(previous_message);
@@ -3009,7 +3008,7 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
// map of the |receiver| if it has a usable enum cache or a fixed array
// with the keys to enumerate in the accumulator.
IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
- TNode<HeapObject> receiver = CAST(LoadRegisterAtOperandIndex(0));
+ TNode<JSReceiver> receiver = CAST(LoadRegisterAtOperandIndex(0));
TNode<Context> context = GetContext();
Label if_empty(this), if_runtime(this, Label::kDeferred);
@@ -3188,14 +3187,12 @@ IGNITION_HANDLER(GetIterator, InterpreterAssembler) {
TNode<Object> receiver = LoadRegisterAtOperandIndex(0);
TNode<Context> context = GetContext();
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
- TNode<IntPtrT> load_feedback_slot = Signed(BytecodeOperandIdx(1));
- TNode<IntPtrT> call_feedback_slot = Signed(BytecodeOperandIdx(2));
- TNode<Smi> load_slot_smi = SmiTag(load_feedback_slot);
- TNode<Smi> call_slot_smi = SmiTag(call_feedback_slot);
+ TNode<TaggedIndex> load_slot = BytecodeOperandIdxTaggedIndex(1);
+ TNode<TaggedIndex> call_slot = BytecodeOperandIdxTaggedIndex(2);
TNode<Object> iterator =
CallBuiltin(Builtins::kGetIteratorWithFeedback, context, receiver,
- load_slot_smi, call_slot_smi, feedback_vector);
+ load_slot, call_slot, feedback_vector);
SetAccumulator(iterator);
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index a7afb8263e..54f4a3caa3 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/init/bootstrapper.h"
#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecode-generator.h"
@@ -40,12 +41,20 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
Status ExecuteJobImpl() final;
Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) final;
+ Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ OffThreadIsolate* isolate) final;
private:
BytecodeGenerator* generator() { return &generator_; }
- void CheckAndPrintBytecodeMismatch(Isolate* isolate, Handle<Script> script,
+ template <typename LocalIsolate>
+ void CheckAndPrintBytecodeMismatch(LocalIsolate* isolate,
+ Handle<Script> script,
Handle<BytecodeArray> bytecode);
+ template <typename LocalIsolate>
+ Status DoFinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ LocalIsolate* isolate);
+
Zone zone_;
UnoptimizedCompilationInfo compilation_info_;
BytecodeGenerator generator_;
@@ -104,10 +113,6 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
kEntriesPerOperandScale;
}
-int Interpreter::InterruptBudget() {
- return FLAG_interrupt_budget;
-}
-
namespace {
void MaybePrintAst(ParseInfo* parse_info,
@@ -171,11 +176,13 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
}
#ifdef DEBUG
+template <typename LocalIsolate>
void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
- Isolate* isolate, Handle<Script> script, Handle<BytecodeArray> bytecode) {
- int first_mismatch = generator()->CheckBytecodeMatches(bytecode);
+ LocalIsolate* isolate, Handle<Script> script,
+ Handle<BytecodeArray> bytecode) {
+ int first_mismatch = generator()->CheckBytecodeMatches(*bytecode);
if (first_mismatch >= 0) {
- parse_info()->ast_value_factory()->Internalize(isolate->factory());
+ parse_info()->ast_value_factory()->Internalize(isolate);
DeclarationScope::AllocateScopeInfos(parse_info(), isolate);
Handle<BytecodeArray> new_bytecode =
@@ -184,8 +191,7 @@ void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
std::cerr << "Bytecode mismatch";
#ifdef OBJECT_PRINT
std::cerr << " found for function: ";
- Handle<String> name =
- parse_info()->function_name()->string().get<Factory>();
+ Handle<String> name = parse_info()->function_name()->string();
if (name->length() == 0) {
std::cerr << "anonymous";
} else {
@@ -214,7 +220,22 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
RuntimeCallCounterId::kCompileIgnitionFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
+ return DoFinalizeJobImpl(shared_info, isolate);
+}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
+ Handle<SharedFunctionInfo> shared_info, OffThreadIsolate* isolate) {
+ RuntimeCallTimerScope runtimeTimerScope(
+ parse_info()->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileBackgroundIgnitionFinalization);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileIgnitionFinalization");
+ return DoFinalizeJobImpl(shared_info, isolate);
+}
+template <typename LocalIsolate>
+InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
+ Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate) {
Handle<BytecodeArray> bytecodes = compilation_info_.bytecode_array();
if (bytecodes.is_null()) {
bytecodes = generator()->FinalizeBytecode(
@@ -265,7 +286,7 @@ Interpreter::NewSourcePositionCollectionJob(
auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
@@ -361,9 +382,7 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
if (counter > 0) {
std::string to_name = Bytecodes::ToString(to_bytecode);
Local<v8::String> to_name_object =
- v8::String::NewFromUtf8(isolate, to_name.c_str(),
- NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, to_name.c_str()).ToLocalChecked();
Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
CHECK(counters_row
->DefineOwnProperty(context, to_name_object, counter_object)
@@ -373,9 +392,7 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
std::string from_name = Bytecodes::ToString(from_bytecode);
Local<v8::String> from_name_object =
- v8::String::NewFromUtf8(isolate, from_name.c_str(),
- NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, from_name.c_str()).ToLocalChecked();
CHECK(
counters_map->DefineOwnProperty(context, from_name_object, counters_row)
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index d85b872639..3ef28fdfbf 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -38,9 +38,6 @@ class Interpreter {
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() = default;
- // Returns the interrupt budget which should be used for the profiler counter.
- V8_EXPORT_PRIVATE static int InterruptBudget();
-
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
// compilable inner FunctionLiterals to this list.
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 928bca4d52..becf21cd4e 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -267,7 +267,7 @@ void JsonParser<Char>::ReportUnexpectedToken(JsonToken token) {
Handle<Script> script(factory->NewScript(original_source_));
if (isolate()->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
+ Script::InitLineEnds(isolate(), script);
}
// We should sent compile error event because we compile JSON object in
// separated source file.
diff --git a/deps/v8/src/libplatform/tracing/DEPS b/deps/v8/src/libplatform/tracing/DEPS
index 7a45bba55a..6d81016d6a 100644
--- a/deps/v8/src/libplatform/tracing/DEPS
+++ b/deps/v8/src/libplatform/tracing/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+libplatform",
"+perfetto",
"+protos/perfetto",
]
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
index d13332871f..a0ab31c981 100644
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
@@ -7,6 +7,7 @@
#include <ostream>
+#include "libplatform/libplatform-export.h"
#include "src/libplatform/tracing/trace-event-listener.h"
namespace perfetto {
@@ -21,7 +22,8 @@ namespace tracing {
// A listener that converts the proto trace data to JSON and writes it to a
// file.
-class JSONTraceEventListener final : public TraceEventListener {
+class V8_PLATFORM_EXPORT JSONTraceEventListener final
+ : public TraceEventListener {
public:
explicit JSONTraceEventListener(std::ostream* stream);
~JSONTraceEventListener() override;
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.h b/deps/v8/src/libplatform/tracing/trace-event-listener.h
index 6a535c79c5..f8fa7932e8 100644
--- a/deps/v8/src/libplatform/tracing/trace-event-listener.h
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.h
@@ -7,6 +7,8 @@
#include <vector>
+#include "libplatform/libplatform-export.h"
+
namespace perfetto {
namespace protos {
class TracePacket;
@@ -22,7 +24,7 @@ namespace tracing {
// the PerfettoConsumer class has to perform. Clients override ProcessPacket()
// to respond to trace events, e.g. to write them to a file as JSON or for
// testing purposes.
-class TraceEventListener {
+class V8_PLATFORM_EXPORT TraceEventListener {
public:
virtual ~TraceEventListener() = default;
virtual void ProcessPacket(const ::perfetto::protos::TracePacket& packet) = 0;
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 9677b58c19..e2091ceb32 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -372,7 +372,9 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
+#if !(V8_OS_OPENBSD || \
+ (V8_OS_LINUX && \
+ (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390 || V8_HOST_ARCH_PPC64)))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
#if V8_OS_LINUX
@@ -413,7 +415,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_PPC
+#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
#if V8_LIBC_GLIBC
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
state->sp =
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 531d8c1411..32f7e3e652 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -55,19 +55,9 @@ namespace internal {
51) \
HR(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm, 1, 2 << 16, \
51) \
- HR(wasm_decode_asm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.asm, 1, GB, 51) \
- HR(wasm_decode_wasm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.wasm, 1, GB, 51) \
- HR(asm_wasm_translation_peak_memory_bytes, \
- V8.AsmWasmTranslationPeakMemoryBytes, 1, GB, 51) \
HR(wasm_compile_function_peak_memory_bytes, \
V8.WasmCompileFunctionPeakMemoryBytes, 1, GB, 51) \
HR(asm_module_size_bytes, V8.AsmModuleSizeBytes, 1, GB, 51) \
- HR(asm_wasm_translation_throughput, V8.AsmWasmTranslationThroughput, 1, 100, \
- 20) \
- HR(wasm_lazy_compilation_throughput, V8.WasmLazyCompilationThroughput, 1, \
- 10000, 50) \
HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 20, \
21) \
HR(wasm_memory_allocation_result, V8.WasmMemoryAllocationResult, 0, 3, 4) \
@@ -81,8 +71,6 @@ namespace internal {
/* code size per module after top-tier compilation */ \
HR(wasm_module_code_size_mb_after_top_tier, V8.WasmModuleCodeSizeTopTierMiB, \
0, 1024, 64) \
- /* freed code size per module, collected on GC */ \
- HR(wasm_module_freed_code_size_mb, V8.WasmModuleCodeSizeFreed, 0, 1024, 64) \
/* percent of freed code size per module, collected on GC */ \
HR(wasm_module_freed_code_size_percent, V8.WasmModuleCodeSizePercentFreed, \
0, 100, 32) \
@@ -124,12 +112,7 @@ namespace internal {
/* Total compilation time incl. caching/parsing */ \
HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND) \
/* Total JavaScript execution time (including callbacks and runtime calls */ \
- HT(execute, V8.Execute, 1000000, MICROSECOND) \
- /* Asm/Wasm */ \
- HT(asm_wasm_translation_time, V8.AsmWasmTranslationMicroSeconds, 1000000, \
- MICROSECOND) \
- HT(wasm_lazy_compilation_time, V8.WasmLazyCompilationMicroSeconds, 1000000, \
- MICROSECOND)
+ HT(execute, V8.Execute, 1000000, MICROSECOND)
#define TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, thread safe: HT(name, caption, max, unit) */ \
@@ -140,6 +123,8 @@ namespace internal {
HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
HT(gc_finalize_background, V8.GCFinalizeMCBackground, 10000, MILLISECOND) \
HT(gc_finalize_foreground, V8.GCFinalizeMCForeground, 10000, MILLISECOND) \
+ HT(gc_finalize_measure_memory, V8.GCFinalizeMCMeasureMemory, 10000, \
+ MILLISECOND) \
HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
MILLISECOND) \
HT(gc_finalize_reduce_memory_background, \
@@ -149,6 +134,8 @@ namespace internal {
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
+ HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
+ MILLISECOND) \
/* TurboFan timers. */ \
HT(turbofan_optimize_prepare, V8.TurboFanOptimizePrepare, 1000000, \
MICROSECOND) \
@@ -175,14 +162,6 @@ namespace internal {
HT(turbofan_osr_total_time, \
V8.TurboFanOptimizeForOnStackReplacementTotalTime, 10000000, MICROSECOND) \
/* Wasm timers. */ \
- HT(wasm_decode_asm_module_time, V8.WasmDecodeModuleMicroSeconds.asm, \
- 1000000, MICROSECOND) \
- HT(wasm_decode_wasm_module_time, V8.WasmDecodeModuleMicroSeconds.wasm, \
- 1000000, MICROSECOND) \
- HT(wasm_decode_asm_function_time, V8.WasmDecodeFunctionMicroSeconds.asm, \
- 1000000, MICROSECOND) \
- HT(wasm_decode_wasm_function_time, V8.WasmDecodeFunctionMicroSeconds.wasm, \
- 1000000, MICROSECOND) \
HT(wasm_compile_asm_module_time, V8.WasmCompileModuleMicroSeconds.asm, \
10000000, MICROSECOND) \
HT(wasm_compile_wasm_module_time, V8.WasmCompileModuleMicroSeconds.wasm, \
@@ -205,7 +184,6 @@ namespace internal {
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND) \
- HT(wasm_code_gc_time, V8.WasmCodeGCTime, 1000000, MICROSECOND) \
/* Total compilation time incl. caching/parsing for various cache states. */ \
HT(compile_script_with_produce_cache, \
V8.CompileScriptMicroSeconds.ProduceCache, 1000000, MICROSECOND) \
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index 03b210f4d3..21248389b2 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -319,6 +319,11 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
base::TimeTicks (*RuntimeCallTimer::Now)() =
&base::TimeTicks::HighResolutionNow;
+base::TimeTicks RuntimeCallTimer::NowCPUTime() {
+ base::ThreadTicks ticks = base::ThreadTicks::Now();
+ return base::TimeTicks::FromInternalValue(ticks.ToInternalValue());
+}
+
class RuntimeCallStatEntries {
public:
void Print(std::ostream& os) {
@@ -454,6 +459,11 @@ RuntimeCallStats::RuntimeCallStats(ThreadType thread_type)
for (int i = 0; i < kNumberOfCounters; i++) {
this->counters_[i] = RuntimeCallCounter(kNames[i]);
}
+ if (FLAG_rcs_cpu_time) {
+ CHECK(base::ThreadTicks::IsSupported());
+ base::ThreadTicks::WaitUntilInitialized();
+ RuntimeCallTimer::Now = &RuntimeCallTimer::NowCPUTime;
+ }
}
namespace {
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index a2c918e4c1..44d4278896 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -714,6 +714,9 @@ class RuntimeCallTimer final {
// Make the time source configurable for testing purposes.
V8_EXPORT_PRIVATE static base::TimeTicks (*Now)();
+ // Helper to switch over to CPU time.
+ static base::TimeTicks NowCPUTime();
+
private:
inline void Pause(base::TimeTicks now);
inline void Resume(base::TimeTicks now);
@@ -735,6 +738,7 @@ class RuntimeCallTimer final {
V(ArrayBuffer_Detach) \
V(ArrayBuffer_New) \
V(ArrayBuffer_NewBackingStore) \
+ V(ArrayBuffer_BackingStore_Reallocate) \
V(Array_CloneElementAt) \
V(Array_New) \
V(BigInt64Array_New) \
@@ -861,6 +865,7 @@ class RuntimeCallTimer final {
V(String_NewFromOneByte) \
V(String_NewFromTwoByte) \
V(String_NewFromUtf8) \
+ V(String_NewFromUtf8Literal) \
V(StringObject_New) \
V(StringObject_StringValue) \
V(String_Write) \
@@ -903,6 +908,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Eval) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Function) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Ignition) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, IgnitionFinalization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, RewriteReturnResult) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
@@ -982,12 +988,13 @@ class RuntimeCallTimer final {
V(CompileFinalizeBackgroundCompileTask) \
V(CompileFinishNowOnDispatcher) \
V(CompileGetFromOptimizedCodeMap) \
- V(CompileIgnitionFinalization) \
+ V(CompilePublishBackgroundFinalization) \
V(CompileSerialize) \
V(CompileWaitForDispatcher) \
V(DeoptimizeCode) \
V(DeserializeContext) \
V(DeserializeIsolate) \
+ V(FinalizationRegistryCleanupFromTask) \
V(FunctionCallback) \
V(FunctionLengthGetter) \
V(FunctionPrototypeGetter) \
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 0343db987f..dc79ffda5e 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -238,11 +238,8 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
const wasm::WasmCode* code,
wasm::WasmName name) {
name_buffer_->Init(tag);
- if (name.empty()) {
- name_buffer_->AppendBytes("<wasm-unnamed>");
- } else {
- name_buffer_->AppendBytes(name.begin(), name.length());
- }
+ DCHECK(!name.empty());
+ name_buffer_->AppendBytes(name.begin(), name.length());
name_buffer_->AppendByte('-');
if (code->IsAnonymous()) {
name_buffer_->AppendBytes("<anonymous>");
@@ -587,6 +584,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "arm";
#elif V8_TARGET_ARCH_PPC
const char arch[] = "ppc";
+#elif V8_TARGET_ARCH_PPC64
+ const char arch[] = "ppc64";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_ARM64
@@ -1303,11 +1302,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
AppendCodeCreateHeader(msg, tag, AbstractCode::Kind::WASM_FUNCTION,
code->instructions().begin(),
code->instructions().length(), &timer_);
- if (name.empty()) {
- msg << "<unknown wasm>";
- } else {
- msg.AppendString(name);
- }
+ DCHECK(!name.empty());
+ msg.AppendString(name);
+
// We have to add two extra fields that allow the tick processor to group
// events for the same wasm function, even if it gets compiled again. For
// normal JS functions, we use the shared function info. For wasm, the pointer
@@ -1355,8 +1352,8 @@ void Logger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
void Logger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
if (!is_listening_to_code_events()) return;
- MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from.address(),
- to.address());
+ MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from.InstructionStart(),
+ to.InstructionStart());
}
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
@@ -1641,9 +1638,9 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.WriteToLogFile();
}
-void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
- char old_state, char new_state, const char* modifier,
- const char* slow_stub_reason) {
+void Logger::ICEvent(const char* type, bool keyed, Handle<Map> map,
+ Handle<Object> key, char old_state, char new_state,
+ const char* modifier, const char* slow_stub_reason) {
if (!log_->IsEnabled() || !FLAG_trace_ic) return;
Log::MessageBuilder msg(log_.get());
if (keyed) msg << "Keyed";
@@ -1652,13 +1649,13 @@ void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
Address pc = isolate_->GetAbstractPC(&line, &column);
msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << line << kNext
<< column << kNext << old_state << kNext << new_state << kNext
- << AsHex::Address(map.ptr()) << kNext;
- if (key.IsSmi()) {
- msg << Smi::ToInt(key);
- } else if (key.IsNumber()) {
- msg << key.Number();
- } else if (key.IsName()) {
- msg << Name::cast(key);
+ << AsHex::Address(map.is_null() ? kNullAddress : map->ptr()) << kNext;
+ if (key->IsSmi()) {
+ msg << Smi::ToInt(*key);
+ } else if (key->IsNumber()) {
+ msg << key->Number();
+ } else if (key->IsName()) {
+ msg << Name::cast(*key);
}
msg << kNext << modifier << kNext;
if (slow_stub_reason != nullptr) {
@@ -1667,11 +1664,10 @@ void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
msg.WriteToLogFile();
}
-void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
- HeapObject name_or_sfi) {
- DisallowHeapAllocation no_gc;
+void Logger::MapEvent(const char* type, Handle<Map> from, Handle<Map> to,
+ const char* reason, Handle<HeapObject> name_or_sfi) {
if (!log_->IsEnabled() || !FLAG_trace_maps) return;
- if (!to.is_null()) MapDetails(to);
+ if (!to.is_null()) MapDetails(*to);
int line = -1;
int column = -1;
Address pc = 0;
@@ -1681,15 +1677,16 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
}
Log::MessageBuilder msg(log_.get());
msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
- << kNext << AsHex::Address(from.ptr()) << kNext
- << AsHex::Address(to.ptr()) << kNext << AsHex::Address(pc) << kNext
- << line << kNext << column << kNext << reason << kNext;
+ << kNext << AsHex::Address(from.is_null() ? kNullAddress : from->ptr())
+ << kNext << AsHex::Address(to.is_null() ? kNullAddress : to->ptr())
+ << kNext << AsHex::Address(pc) << kNext << line << kNext << column
+ << kNext << reason << kNext;
if (!name_or_sfi.is_null()) {
- if (name_or_sfi.IsName()) {
- msg << Name::cast(name_or_sfi);
- } else if (name_or_sfi.IsSharedFunctionInfo()) {
- SharedFunctionInfo sfi = SharedFunctionInfo::cast(name_or_sfi);
+ if (name_or_sfi->IsName()) {
+ msg << Name::cast(*name_or_sfi);
+ } else if (name_or_sfi->IsSharedFunctionInfo()) {
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(*name_or_sfi);
msg << sfi.DebugName();
#if V8_SFI_HAS_UNIQUE_ID
msg << " " << sfi.unique_id();
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 16f8f69e60..9e4dfeb28d 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -70,15 +70,15 @@ class Profiler;
class Ticker;
#undef LOG
-#define LOG(isolate, Call) \
- do { \
- v8::internal::Logger* logger = (isolate)->logger(); \
- if (logger->is_logging()) logger->Call; \
+#define LOG(isolate, Call) \
+ do { \
+ auto* logger = (isolate)->logger(); \
+ if (logger->is_logging()) logger->Call; \
} while (false)
#define LOG_CODE_EVENT(isolate, Call) \
do { \
- v8::internal::Logger* logger = (isolate)->logger(); \
+ auto* logger = (isolate)->logger(); \
if (logger->is_listening_to_code_events()) logger->Call; \
} while (false)
@@ -222,14 +222,13 @@ class Logger : public CodeEventListener {
void CodeNameEvent(Address addr, int pos, const char* code_name);
+ void ICEvent(const char* type, bool keyed, Handle<Map> map,
+ Handle<Object> key, char old_state, char new_state,
+ const char* modifier, const char* slow_stub_reason);
- void ICEvent(const char* type, bool keyed, Map map, Object key,
- char old_state, char new_state, const char* modifier,
- const char* slow_stub_reason);
-
- void MapEvent(const char* type, Map from, Map to,
+ void MapEvent(const char* type, Handle<Map> from, Handle<Map> to,
const char* reason = nullptr,
- HeapObject name_or_sfi = HeapObject());
+ Handle<HeapObject> name_or_sfi = Handle<HeapObject>());
void MapCreate(Map map);
void MapDetails(Map map);
diff --git a/deps/v8/src/logging/off-thread-logger.h b/deps/v8/src/logging/off-thread-logger.h
new file mode 100644
index 0000000000..fab58c311b
--- /dev/null
+++ b/deps/v8/src/logging/off-thread-logger.h
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOGGING_OFF_THREAD_LOGGER_H_
+#define V8_LOGGING_OFF_THREAD_LOGGER_H_
+
+#include "src/base/logging.h"
+#include "src/logging/log.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(leszeks): Add support for logging from off-thread.
+class OffThreadLogger {
+ public:
+ bool is_logging() const { return false; }
+ bool is_listening_to_code_events() const { return false; }
+ void ScriptEvent(Logger::ScriptEventType type, int script_id) {
+ UNREACHABLE();
+ }
+ void CodeLinePosInfoRecordEvent(Address code_start,
+ ByteArray source_position_table) {
+ UNREACHABLE();
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOGGING_OFF_THREAD_LOGGER_H_
diff --git a/deps/v8/src/numbers/conversions-inl.h b/deps/v8/src/numbers/conversions-inl.h
index f9d5346ef3..42a30b3428 100644
--- a/deps/v8/src/numbers/conversions-inl.h
+++ b/deps/v8/src/numbers/conversions-inl.h
@@ -81,7 +81,9 @@ inline float DoubleToFloat32(double x) {
inline double DoubleToInteger(double x) {
if (std::isnan(x)) return 0;
- if (!std::isfinite(x) || x == 0) return x;
+ if (!std::isfinite(x)) return x;
+ // ToInteger normalizes -0 to +0.
+ if (x == 0.0) return 0;
return (x >= 0) ? std::floor(x) : std::ceil(x);
}
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index faf3e33df3..dfbd1d03fe 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -9,6 +9,7 @@
#include <cmath>
#include "src/common/assert-scope.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/numbers/dtoa.h"
@@ -167,25 +168,35 @@ double InternalStringToIntDouble(Iterator current, EndMark end, bool negative,
return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
}
+namespace {
+
+// Subclasses of StringToIntHelper get access to internal state:
+enum class State { kRunning, kError, kJunk, kEmpty, kZero, kDone };
+
+enum class Sign { kNegative, kPositive, kNone };
+
+} // namespace
+
// ES6 18.2.5 parseInt(string, radix) (with NumberParseIntHelper subclass);
// and BigInt parsing cases from https://tc39.github.io/proposal-bigint/
// (with StringToBigIntHelper subclass).
+template <typename LocalIsolate>
class StringToIntHelper {
public:
- StringToIntHelper(Isolate* isolate, Handle<String> subject, int radix)
+ StringToIntHelper(LocalIsolate* isolate, Handle<String> subject, int radix)
: isolate_(isolate), subject_(subject), radix_(radix) {
DCHECK(subject->IsFlat());
}
// Used for the StringToBigInt operation.
- StringToIntHelper(Isolate* isolate, Handle<String> subject)
+ StringToIntHelper(LocalIsolate* isolate, Handle<String> subject)
: isolate_(isolate), subject_(subject) {
DCHECK(subject->IsFlat());
}
// Used for parsing BigInt literals, where the input is a Zone-allocated
// buffer of one-byte digits, along with an optional radix prefix.
- StringToIntHelper(Isolate* isolate, const uint8_t* subject, int length)
+ StringToIntHelper(LocalIsolate* isolate, const uint8_t* subject, int length)
: isolate_(isolate), raw_one_byte_subject_(subject), length_(length) {}
virtual ~StringToIntHelper() = default;
@@ -225,12 +236,7 @@ class StringToIntHelper {
return subject_->GetFlatContent(no_gc).ToUC16Vector();
}
- // Subclasses get access to internal state:
- enum State { kRunning, kError, kJunk, kEmpty, kZero, kDone };
-
- enum class Sign { kNegative, kPositive, kNone };
-
- Isolate* isolate() { return isolate_; }
+ LocalIsolate* isolate() { return isolate_; }
int radix() { return radix_; }
int cursor() { return cursor_; }
int length() { return length_; }
@@ -245,7 +251,7 @@ class StringToIntHelper {
template <class Char>
void ParseInternal(Char start);
- Isolate* isolate_;
+ LocalIsolate* isolate_;
Handle<String> subject_;
const uint8_t* raw_one_byte_subject_ = nullptr;
int radix_ = 0;
@@ -255,10 +261,11 @@ class StringToIntHelper {
bool leading_zero_ = false;
bool allow_binary_and_octal_prefixes_ = false;
bool allow_trailing_junk_ = true;
- State state_ = kRunning;
+ State state_ = State::kRunning;
};
-void StringToIntHelper::ParseInt() {
+template <typename LocalIsolate>
+void StringToIntHelper<LocalIsolate>::ParseInt() {
{
DisallowHeapAllocation no_gc;
if (IsOneByte()) {
@@ -269,10 +276,10 @@ void StringToIntHelper::ParseInt() {
DetectRadixInternal(vector.begin(), vector.length());
}
}
- if (state_ != kRunning) return;
+ if (state_ != State::kRunning) return;
AllocateResult();
HandleSpecialCases();
- if (state_ != kRunning) return;
+ if (state_ != State::kRunning) return;
{
DisallowHeapAllocation no_gc;
if (IsOneByte()) {
@@ -285,30 +292,32 @@ void StringToIntHelper::ParseInt() {
ParseInternal(vector.begin());
}
}
- DCHECK_NE(state_, kRunning);
+ DCHECK_NE(state_, State::kRunning);
}
+template <typename LocalIsolate>
template <class Char>
-void StringToIntHelper::DetectRadixInternal(Char current, int length) {
+void StringToIntHelper<LocalIsolate>::DetectRadixInternal(Char current,
+ int length) {
Char start = current;
length_ = length;
Char end = start + length;
if (!AdvanceToNonspace(&current, end)) {
- return set_state(kEmpty);
+ return set_state(State::kEmpty);
}
if (*current == '+') {
// Ignore leading sign; skip following spaces.
++current;
if (current == end) {
- return set_state(kJunk);
+ return set_state(State::kJunk);
}
sign_ = Sign::kPositive;
} else if (*current == '-') {
++current;
if (current == end) {
- return set_state(kJunk);
+ return set_state(State::kJunk);
}
sign_ = Sign::kNegative;
}
@@ -318,21 +327,21 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
radix_ = 10;
if (*current == '0') {
++current;
- if (current == end) return set_state(kZero);
+ if (current == end) return set_state(State::kZero);
if (*current == 'x' || *current == 'X') {
radix_ = 16;
++current;
- if (current == end) return set_state(kJunk);
+ if (current == end) return set_state(State::kJunk);
} else if (allow_binary_and_octal_prefixes_ &&
(*current == 'o' || *current == 'O')) {
radix_ = 8;
++current;
- if (current == end) return set_state(kJunk);
+ if (current == end) return set_state(State::kJunk);
} else if (allow_binary_and_octal_prefixes_ &&
(*current == 'b' || *current == 'B')) {
radix_ = 2;
++current;
- if (current == end) return set_state(kJunk);
+ if (current == end) return set_state(State::kJunk);
} else {
leading_zero_ = true;
}
@@ -341,10 +350,10 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
if (*current == '0') {
// Allow "0x" prefix.
++current;
- if (current == end) return set_state(kZero);
+ if (current == end) return set_state(State::kZero);
if (*current == 'x' || *current == 'X') {
++current;
- if (current == end) return set_state(kJunk);
+ if (current == end) return set_state(State::kJunk);
} else {
leading_zero_ = true;
}
@@ -354,11 +363,11 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
while (*current == '0') {
leading_zero_ = true;
++current;
- if (current == end) return set_state(kZero);
+ if (current == end) return set_state(State::kZero);
}
if (!leading_zero_ && !isDigit(*current, radix_)) {
- return set_state(kJunk);
+ return set_state(State::kJunk);
}
DCHECK(radix_ >= 2 && radix_ <= 36);
@@ -366,8 +375,9 @@ void StringToIntHelper::DetectRadixInternal(Char current, int length) {
cursor_ = static_cast<int>(current - start);
}
+template <typename LocalIsolate>
template <class Char>
-void StringToIntHelper::ParseInternal(Char start) {
+void StringToIntHelper<LocalIsolate>::ParseInternal(Char start) {
Char current = start + cursor_;
Char end = start + length_;
@@ -426,13 +436,13 @@ void StringToIntHelper::ParseInternal(Char start) {
} while (!done);
if (!allow_trailing_junk_ && AdvanceToNonspace(&current, end)) {
- return set_state(kJunk);
+ return set_state(State::kJunk);
}
- return set_state(kDone);
+ return set_state(State::kDone);
}
-class NumberParseIntHelper : public StringToIntHelper {
+class NumberParseIntHelper : public StringToIntHelper<Isolate> {
public:
NumberParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
: StringToIntHelper(isolate, string, radix) {}
@@ -440,15 +450,15 @@ class NumberParseIntHelper : public StringToIntHelper {
double GetResult() {
ParseInt();
switch (state()) {
- case kJunk:
- case kEmpty:
+ case State::kJunk:
+ case State::kEmpty:
return JunkStringValue();
- case kZero:
+ case State::kZero:
return SignedZero(negative());
- case kDone:
+ case State::kDone:
return negative() ? -result_ : result_;
- case kError:
- case kRunning:
+ case State::kError:
+ case State::kRunning:
break;
}
UNREACHABLE();
@@ -476,7 +486,7 @@ class NumberParseIntHelper : public StringToIntHelper {
result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.begin())
: HandleBaseTenCase(vector.begin());
}
- set_state(kDone);
+ set_state(State::kDone);
}
template <class Char>
@@ -573,7 +583,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
const int kBufferSize = kMaxSignificantDigits + 10;
- char buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ char buffer[kBufferSize];
int buffer_pos = 0;
// Exponent will be adjusted if insignificant digits of the integer part
@@ -825,59 +835,50 @@ double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
return helper.GetResult();
}
-class StringToBigIntHelper : public StringToIntHelper {
+template <typename LocalIsolate>
+class StringToBigIntHelper : public StringToIntHelper<LocalIsolate> {
public:
enum class Behavior { kStringToBigInt, kLiteral };
// Used for StringToBigInt operation (BigInt constructor and == operator).
- StringToBigIntHelper(Isolate* isolate, Handle<String> string)
- : StringToIntHelper(isolate, string),
+ StringToBigIntHelper(LocalIsolate* isolate, Handle<String> string)
+ : StringToIntHelper<LocalIsolate>(isolate, string),
behavior_(Behavior::kStringToBigInt) {
- set_allow_binary_and_octal_prefixes();
- set_disallow_trailing_junk();
+ this->set_allow_binary_and_octal_prefixes();
+ this->set_disallow_trailing_junk();
}
// Used for parsing BigInt literals, where the input is a buffer of
// one-byte ASCII digits, along with an optional radix prefix.
- StringToBigIntHelper(Isolate* isolate, const uint8_t* string, int length)
- : StringToIntHelper(isolate, string, length),
+ StringToBigIntHelper(LocalIsolate* isolate, const uint8_t* string, int length)
+ : StringToIntHelper<LocalIsolate>(isolate, string, length),
behavior_(Behavior::kLiteral) {
- set_allow_binary_and_octal_prefixes();
+ this->set_allow_binary_and_octal_prefixes();
}
MaybeHandle<BigInt> GetResult() {
- ParseInt();
- if (behavior_ == Behavior::kStringToBigInt && sign() != Sign::kNone &&
- radix() != 10) {
+ this->ParseInt();
+ if (behavior_ == Behavior::kStringToBigInt && this->sign() != Sign::kNone &&
+ this->radix() != 10) {
return MaybeHandle<BigInt>();
}
- if (state() == kEmpty) {
+ if (this->state() == State::kEmpty) {
if (behavior_ == Behavior::kStringToBigInt) {
- set_state(kZero);
+ this->set_state(State::kZero);
} else {
UNREACHABLE();
}
}
- switch (state()) {
- case kJunk:
- if (should_throw() == kThrowOnError) {
- THROW_NEW_ERROR(isolate(),
- NewSyntaxError(MessageTemplate::kBigIntInvalidString),
- BigInt);
- } else {
- DCHECK_EQ(should_throw(), kDontThrow);
- return MaybeHandle<BigInt>();
- }
- case kZero:
- return BigInt::Zero(isolate());
- case kError:
- DCHECK_EQ(should_throw() == kThrowOnError,
- isolate()->has_pending_exception());
+ switch (this->state()) {
+ case State::kJunk:
+ case State::kError:
return MaybeHandle<BigInt>();
- case kDone:
- return BigInt::Finalize(result_, negative());
- case kEmpty:
- case kRunning:
+ case State::kZero:
+ return BigInt::Zero(this->isolate(), allocation_type());
+ case State::kDone:
+ return BigInt::Finalize<Isolate>(result_, this->negative());
+ case State::kEmpty:
+ case State::kRunning:
break;
}
UNREACHABLE();
@@ -889,42 +890,50 @@ class StringToBigIntHelper : public StringToIntHelper {
// Conseratively assume that all remaining digits are significant.
// Optimization opportunity: Would it makes sense to scan for trailing
// junk before allocating the result?
- int charcount = length() - cursor();
- // For literals, we pretenure the allocated BigInt, since it's about
- // to be stored in the interpreter's constants array.
- AllocationType allocation = behavior_ == Behavior::kLiteral
- ? AllocationType::kOld
- : AllocationType::kYoung;
- MaybeHandle<FreshlyAllocatedBigInt> maybe = BigInt::AllocateFor(
- isolate(), radix(), charcount, should_throw(), allocation);
+ int charcount = this->length() - this->cursor();
+ MaybeHandle<FreshlyAllocatedBigInt> maybe =
+ BigInt::AllocateFor(this->isolate(), this->radix(), charcount,
+ kDontThrow, allocation_type());
if (!maybe.ToHandle(&result_)) {
- set_state(kError);
+ this->set_state(State::kError);
}
}
void ResultMultiplyAdd(uint32_t multiplier, uint32_t part) override {
- BigInt::InplaceMultiplyAdd(result_, static_cast<uintptr_t>(multiplier),
+ BigInt::InplaceMultiplyAdd(*result_, static_cast<uintptr_t>(multiplier),
static_cast<uintptr_t>(part));
}
- private:
- ShouldThrow should_throw() const { return kDontThrow; }
+ AllocationType allocation_type() {
+ // For literals, we pretenure the allocated BigInt, since it's about
+ // to be stored in the interpreter's constants array.
+ return behavior_ == Behavior::kLiteral ? AllocationType::kOld
+ : AllocationType::kYoung;
+ }
+ private:
Handle<FreshlyAllocatedBigInt> result_;
Behavior behavior_;
};
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
string = String::Flatten(isolate, string);
- StringToBigIntHelper helper(isolate, string);
+ StringToBigIntHelper<Isolate> helper(isolate, string);
return helper.GetResult();
}
-MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate, const char* string) {
- StringToBigIntHelper helper(isolate, reinterpret_cast<const uint8_t*>(string),
- static_cast<int>(strlen(string)));
+template <typename LocalIsolate>
+MaybeHandle<BigInt> BigIntLiteral(LocalIsolate* isolate, const char* string) {
+ StringToBigIntHelper<LocalIsolate> helper(
+ isolate, reinterpret_cast<const uint8_t*>(string),
+ static_cast<int>(strlen(string)));
return helper.GetResult();
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate, const char* string);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ MaybeHandle<BigInt> BigIntLiteral(OffThreadIsolate* isolate,
+ const char* string);
const char* DoubleToCString(double v, Vector<char> buffer) {
switch (FPCLASSIFY_NAMESPACE::fpclassify(v)) {
diff --git a/deps/v8/src/numbers/conversions.h b/deps/v8/src/numbers/conversions.h
index e71f7ce5d0..fa90ee2a6e 100644
--- a/deps/v8/src/numbers/conversions.h
+++ b/deps/v8/src/numbers/conversions.h
@@ -5,6 +5,7 @@
#ifndef V8_NUMBERS_CONVERSIONS_H_
#define V8_NUMBERS_CONVERSIONS_H_
+#include "src/base/export-template.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/utils/vector.h"
@@ -99,8 +100,9 @@ MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string);
// 0x -> hex
// 0o -> octal
// 0b -> binary
-V8_EXPORT_PRIVATE MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate,
- const char* string);
+template <typename LocalIsolate>
+EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+MaybeHandle<BigInt> BigIntLiteral(LocalIsolate* isolate, const char* string);
const int kDoubleToCStringMinBufferSize = 100;
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index cf17371928..f0788b7ca1 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -225,7 +225,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
}
JSObject::TransitionElementsKind(boilerplate, to_kind);
site->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+ DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
}
@@ -245,7 +245,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
}
site->SetElementsKind(to_kind);
site->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+ DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
}
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index a069e29807..0bec61a7b8 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -96,7 +96,6 @@ bool AccessorInfo::HasExpectedReceiverType() {
return expected_receiver_type().IsFunctionTemplateInfo();
}
-TQ_SMI_ACCESSORS(InterceptorInfo, flags)
BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
kCanInterceptSymbolsBit)
BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 2bf1a26b9a..77ff26d659 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_API_CALLBACKS_H_
#define V8_OBJECTS_API_CALLBACKS_H_
-#include "src/base/bit-field.h"
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
@@ -85,20 +85,8 @@ class AccessorInfo : public Struct {
private:
inline bool HasExpectedReceiverType();
-// Bit positions in |flags|.
-#define ACCESSOR_INFO_FLAGS_BIT_FIELDS(V, _) \
- V(AllCanReadBit, bool, 1, _) \
- V(AllCanWriteBit, bool, 1, _) \
- V(IsSpecialDataPropertyBit, bool, 1, _) \
- V(IsSloppyBit, bool, 1, _) \
- V(ReplaceOnAccessBit, bool, 1, _) \
- V(GetterSideEffectTypeBits, SideEffectType, 2, _) \
- /* We could save a bit from setter side-effect type, if necessary */ \
- V(SetterSideEffectTypeBits, SideEffectType, 2, _) \
- V(InitialAttributesBits, PropertyAttributes, 3, _)
-
- DEFINE_BIT_FIELDS(ACCESSOR_INFO_FLAGS_BIT_FIELDS)
-#undef ACCESSOR_INFO_FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_ACCESSOR_INFO_FLAGS()
OBJECT_CONSTRUCTORS(AccessorInfo, Struct);
};
@@ -123,9 +111,6 @@ class InterceptorInfo
DECL_BOOLEAN_ACCESSORS(is_named)
DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
- inline int flags() const;
- inline void set_flags(int flags);
-
// Dispatched behavior.
DECL_PRINTER(InterceptorInfo)
diff --git a/deps/v8/src/objects/api-callbacks.tq b/deps/v8/src/objects/api-callbacks.tq
index 751acd1def..47d25546f5 100644
--- a/deps/v8/src/objects/api-callbacks.tq
+++ b/deps/v8/src/objects/api-callbacks.tq
@@ -30,9 +30,23 @@ extern class AccessCheckInfo extends Struct {
data: Object;
}
+type PropertyAttributes extends int32 constexpr 'PropertyAttributes';
+type SideEffectType extends int32 constexpr 'SideEffectType';
+
+bitfield struct AccessorInfoFlags extends uint31 {
+ all_can_read: bool: 1 bit;
+ all_can_write: bool: 1 bit;
+ is_special_data_property: bool: 1 bit;
+ is_sloppy: bool: 1 bit;
+ replace_on_access: bool: 1 bit;
+ getter_side_effect_type: SideEffectType: 2 bit;
+ setter_side_effect_type: SideEffectType: 2 bit;
+ initial_attributes: PropertyAttributes: 3 bit;
+}
+
extern class AccessorInfo extends Struct {
name: Object;
- flags: Smi;
+ flags: SmiTagged<AccessorInfoFlags>;
expected_receiver_type: Object;
setter: NonNullForeign|Zero;
getter: NonNullForeign|Zero;
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index feeb9dd1a9..8c49d909d2 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -24,8 +24,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry)
CAST_ACCESSOR(SloppyArgumentsElements)
-TQ_SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot)
-
DEF_GETTER(SloppyArgumentsElements, context, Context) {
return TaggedField<Context>::load(isolate, *this,
OffsetOfElementAt(kContextIndex));
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index c69545bfda..52cfb810fb 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -113,9 +113,6 @@ class AliasedArgumentsEntry
: public TorqueGeneratedAliasedArgumentsEntry<AliasedArgumentsEntry,
Struct> {
public:
- inline int aliased_context_slot() const;
- inline void set_aliased_context_slot(int count);
-
// Dispatched behavior.
DECL_PRINTER(AliasedArgumentsEntry)
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index ce138c9461..52ab0085f7 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -9,6 +9,7 @@
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/logging/counters.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -126,12 +127,12 @@ inline void DebugCheckZero(void* start, size_t byte_length) {
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
+ uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
while (true) {
- uint64_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
- if (reserved_address_space_.compare_exchange_weak(old_count,
- old_count + num_bytes)) {
+ if (reserved_address_space_.compare_exchange_weak(
+ old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
return true;
}
}
@@ -164,10 +165,7 @@ void BackingStore::Clear() {
BackingStore::~BackingStore() {
GlobalBackingStoreRegistry::Unregister(this);
- if (buffer_start_ == nullptr) {
- Clear();
- return;
- }
+ if (buffer_start_ == nullptr) return; // nothing to deallocate
if (is_wasm_memory_) {
DCHECK(free_on_destruct_);
@@ -265,7 +263,8 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
false, // is_wasm_memory
true, // free_on_destruct
false, // has_guard_regions
- false); // custom_deleter
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), byte_length);
@@ -316,9 +315,11 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
// Compute size of reserved memory.
- size_t engine_max_pages = wasm::max_mem_pages();
- size_t byte_capacity =
- std::min(engine_max_pages, maximum_pages) * wasm::kWasmPageSize;
+ size_t engine_max_pages = wasm::max_maximum_mem_pages();
+ maximum_pages = std::min(engine_max_pages, maximum_pages);
+ CHECK_LE(maximum_pages,
+ std::numeric_limits<size_t>::max() / wasm::kWasmPageSize);
+ size_t byte_capacity = maximum_pages * wasm::kWasmPageSize;
size_t reservation_size = GetReservationSize(guards, byte_capacity);
//--------------------------------------------------------------------------
@@ -388,7 +389,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
true, // is_wasm_memory
true, // free_on_destruct
guards, // has_guard_regions
- false); // custom_deleter
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS("BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu)\n", result,
result->buffer_start(), byte_length, byte_capacity);
@@ -411,7 +413,15 @@ std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize());
// Enforce engine limitation on the maximum number of pages.
- if (initial_pages > wasm::max_mem_pages()) return nullptr;
+ if (initial_pages > wasm::kV8MaxWasmMemoryPages) return nullptr;
+
+ // Trying to allocate 4 GiB on a 32-bit platform is guaranteed to fail.
+ // We don't lower the official max_maximum_mem_pages() limit because that
+ // would be observable upon instantiation; this way the effective limit
+ // on 32-bit platforms is defined by the allocator.
+ constexpr size_t kPlatformMax =
+ std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
+ if (initial_pages > kPlatformMax) return nullptr;
auto backing_store =
TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared);
@@ -425,7 +435,6 @@ std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
size_t new_pages) {
- DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_);
// Note that we could allocate uninitialized to save initialization cost here,
// but since Wasm memories are allocated by the page allocator, the zeroing
// cost is already built-in.
@@ -440,6 +449,9 @@ std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
}
if (byte_length_ > 0) {
+ // If the allocation was successful, then the new buffer must be at least
+ // as big as the old one.
+ DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_);
memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_);
}
@@ -460,10 +472,9 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
// permissions for the entire range (to be RW), so the operating system
// should deal with that raciness. We know we succeeded when we can
// compare/swap the old length with the new length.
- size_t old_length = 0;
+ size_t old_length = byte_length_.load(std::memory_order_relaxed);
size_t new_length = 0;
while (true) {
- old_length = byte_length_.load(std::memory_order_acquire);
size_t current_pages = old_length / wasm::kWasmPageSize;
// Check if we have exceed the supplied maximum.
@@ -483,7 +494,7 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
}
}
- if (!is_shared_) {
+ if (!is_shared_ && free_on_destruct_) {
// Only do per-isolate accounting for non-shared backing stores.
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_length - old_length);
@@ -525,7 +536,8 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
false, // is_wasm_memory
free_on_destruct, // free_on_destruct
false, // has_guard_regions
- false); // custom_deleter
+ false, // custom_deleter
+ false); // empty_deleter
result->SetAllocatorFromIsolate(isolate);
TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), result->byte_length());
@@ -534,8 +546,9 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
void* allocation_base, size_t allocation_length,
- v8::BackingStoreDeleterCallback deleter, void* deleter_data,
+ v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
+ bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
allocation_length, // capacity
@@ -543,7 +556,8 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
false, // is_wasm_memory
true, // free_on_destruct
false, // has_guard_regions
- true); // custom_deleter
+ true, // custom_deleter
+ is_empty_deleter); // empty_deleter
result->type_specific_data_.deleter = {deleter, deleter_data};
TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), result->byte_length());
@@ -559,11 +573,27 @@ std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
false, // is_wasm_memory
true, // free_on_destruct
false, // has_guard_regions
- false); // custom_deleter
+ false, // custom_deleter
+ false); // empty_deleter
return std::unique_ptr<BackingStore>(result);
}
+bool BackingStore::Reallocate(Isolate* isolate, size_t new_byte_length) {
+ CHECK(!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ &&
+ free_on_destruct_);
+ auto allocator = get_v8_api_array_buffer_allocator();
+ CHECK_EQ(isolate->array_buffer_allocator(), allocator);
+ CHECK_EQ(byte_length_, byte_capacity_);
+ void* new_start =
+ allocator->Reallocate(buffer_start_, byte_length_, new_byte_length);
+ if (!new_start) return false;
+ buffer_start_ = new_start;
+ byte_capacity_ = new_byte_length;
+ byte_length_ = new_byte_length;
+ return true;
+}
+
v8::ArrayBuffer::Allocator* BackingStore::get_v8_api_array_buffer_allocator() {
CHECK(!is_wasm_memory_);
auto array_buffer_allocator =
@@ -605,7 +635,7 @@ void GlobalBackingStoreRegistry::Register(
// BackingStore per buffer_start() because the destructor of
// of the BackingStore will be a no-op in that case.
- // All WASM memory has to be registered.
+ // All Wasm memory has to be registered.
CHECK(!backing_store->is_wasm_memory());
return;
}
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index c9bbcf4ba0..e9a7c8ec15 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -66,7 +66,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static std::unique_ptr<BackingStore> WrapAllocation(
void* allocation_base, size_t allocation_length,
- v8::BackingStoreDeleterCallback deleter, void* deleter_data,
+ v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared);
// Create an empty backing store.
@@ -87,6 +87,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
bool GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
size_t max_pages);
+ // Wrapper around ArrayBuffer::Allocator::Reallocate.
+ bool Reallocate(Isolate* isolate, size_t new_byte_length);
+
// Allocate a new, larger, backing store for this Wasm memory and copy the
// contents of this backing store into it.
std::unique_ptr<BackingStore> CopyWasmMemory(Isolate* isolate,
@@ -117,12 +120,31 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// Update all shared memory objects in this isolate (after a grow operation).
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
+ // Returns the size of the external memory owned by this backing store.
+ // It is used for triggering GCs based on the external memory pressure.
+ size_t PerIsolateAccountingLength() {
+ if (is_shared_) {
+ // TODO(titzer): SharedArrayBuffers and shared WasmMemorys cause problems
+ // with accounting for per-isolate external memory. In particular, sharing
+ // the same array buffer or memory multiple times, which happens in stress
+ // tests, can cause overcounting, leading to GC thrashing. Fix with global
+ // accounting?
+ return 0;
+ }
+ if (empty_deleter_) {
+ // The backing store has an empty deleter. Even if the backing store is
+ // freed after GC, it would not free the memory block.
+ return 0;
+ }
+ return byte_length();
+ }
+
private:
friend class GlobalBackingStoreRegistry;
BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity,
SharedFlag shared, bool is_wasm_memory, bool free_on_destruct,
- bool has_guard_regions, bool custom_deleter)
+ bool has_guard_regions, bool custom_deleter, bool empty_deleter)
: buffer_start_(buffer_start),
byte_length_(byte_length),
byte_capacity_(byte_capacity),
@@ -132,7 +154,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
free_on_destruct_(free_on_destruct),
has_guard_regions_(has_guard_regions),
globally_registered_(false),
- custom_deleter_(custom_deleter) {}
+ custom_deleter_(custom_deleter),
+ empty_deleter_(empty_deleter) {}
void SetAllocatorFromIsolate(Isolate* isolate);
void* buffer_start_ = nullptr;
@@ -140,7 +163,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
size_t byte_capacity_ = 0;
struct DeleterInfo {
- v8::BackingStoreDeleterCallback callback;
+ v8::BackingStore::DeleterCallback callback;
void* data;
};
@@ -175,6 +198,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
bool has_guard_regions_ : 1;
bool globally_registered_ : 1;
bool custom_deleter_ : 1;
+ bool empty_deleter_ : 1;
// Accessors for type-specific data.
v8::ArrayBuffer::Allocator* get_v8_api_array_buffer_allocator();
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index a84cf08f02..dfc302e77c 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -20,6 +20,7 @@
#include "src/objects/bigint.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/numbers/conversions.h"
@@ -44,22 +45,28 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
public:
// Bottleneck for converting MutableBigInts to BigInts.
static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
+ template <typename Isolate = v8::internal::Isolate>
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
static void Canonicalize(MutableBigInt result);
// Allocation helpers.
+ template <typename LocalIsolate>
static MaybeHandle<MutableBigInt> New(
- Isolate* isolate, int length,
+ LocalIsolate* isolate, int length,
AllocationType allocation = AllocationType::kYoung);
static Handle<BigInt> NewFromInt(Isolate* isolate, int value);
static Handle<BigInt> NewFromDouble(Isolate* isolate, double value);
void InitializeDigits(int length, byte value = 0);
static Handle<MutableBigInt> Copy(Isolate* isolate,
Handle<BigIntBase> source);
- static Handle<BigInt> Zero(Isolate* isolate) {
+ template <typename LocalIsolate>
+ static Handle<BigInt> Zero(
+ LocalIsolate* isolate,
+ AllocationType allocation = AllocationType::kYoung) {
// TODO(jkummerow): Consider caching a canonical zero-BigInt.
- return MakeImmutable(New(isolate, 0)).ToHandleChecked();
+ return MakeImmutable<LocalIsolate>(
+ New(isolate, 0, allocation).ToHandleChecked());
}
static Handle<MutableBigInt> Cast(Handle<FreshlyAllocatedBigInt> bigint) {
@@ -236,7 +243,7 @@ NEVER_READ_ONLY_SPACE_IMPL(MutableBigInt)
#include "src/objects/object-macros-undef.h"
-template <typename T>
+template <typename T, typename Isolate>
MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
// If the result of a BigInt computation is truncated to 64 bit, Turbofan
// can sometimes truncate intermediate results already, which can prevent
@@ -250,7 +257,8 @@ MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig), T);
}
-MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
+template <typename LocalIsolate>
+MaybeHandle<MutableBigInt> MutableBigInt::New(LocalIsolate* isolate, int length,
AllocationType allocation) {
if (length > BigInt::kMaxLength) {
return ThrowBigIntTooBig<MutableBigInt>(isolate);
@@ -372,6 +380,7 @@ MaybeHandle<BigInt> MutableBigInt::MakeImmutable(
return MakeImmutable(result);
}
+template <typename LocalIsolate>
Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
MutableBigInt::Canonicalize(*result);
return Handle<BigInt>::cast(result);
@@ -405,9 +414,14 @@ void MutableBigInt::Canonicalize(MutableBigInt result) {
result.digit(result.length() - 1) != 0); // MSD is non-zero.
}
-Handle<BigInt> BigInt::Zero(Isolate* isolate) {
- return MutableBigInt::Zero(isolate);
+template <typename LocalIsolate>
+Handle<BigInt> BigInt::Zero(LocalIsolate* isolate, AllocationType allocation) {
+ return MutableBigInt::Zero(isolate, allocation);
}
+template Handle<BigInt> BigInt::Zero<Isolate>(Isolate* isolate,
+ AllocationType allocation);
+template Handle<BigInt> BigInt::Zero<OffThreadIsolate>(
+ OffThreadIsolate* isolate, AllocationType allocation);
Handle<BigInt> BigInt::UnaryMinus(Isolate* isolate, Handle<BigInt> x) {
// Special case: There is no -0n.
@@ -1505,13 +1519,13 @@ void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
}
// Multiplies {x} with {factor} and then adds {summand} to it.
-void BigInt::InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
- uintptr_t factor, uintptr_t summand) {
+void BigInt::InplaceMultiplyAdd(FreshlyAllocatedBigInt x, uintptr_t factor,
+ uintptr_t summand) {
STATIC_ASSERT(sizeof(factor) == sizeof(digit_t));
STATIC_ASSERT(sizeof(summand) == sizeof(digit_t));
- Handle<MutableBigInt> bigint = MutableBigInt::Cast(x);
- MutableBigInt::InternalMultiplyAdd(*bigint, factor, summand, bigint->length(),
- *bigint);
+ MutableBigInt bigint = MutableBigInt::cast(x);
+ MutableBigInt::InternalMultiplyAdd(bigint, factor, summand, bigint.length(),
+ bigint);
}
// Divides {x} by {divisor}, returning the result in {quotient} and {remainder}.
@@ -1907,8 +1921,9 @@ constexpr uint8_t kMaxBitsPerChar[] = {
static const int kBitsPerCharTableShift = 5;
static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
+template <typename LocalIsolate>
MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ LocalIsolate* isolate, int radix, int charcount, ShouldThrow should_throw,
AllocationType allocation) {
DCHECK(2 <= radix && radix <= 36);
DCHECK_GE(charcount, 0);
@@ -1937,13 +1952,26 @@ MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
return MaybeHandle<FreshlyAllocatedBigInt>();
}
}
+template MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor<Isolate>(
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ AllocationType allocation);
+template MaybeHandle<FreshlyAllocatedBigInt>
+BigInt::AllocateFor<OffThreadIsolate>(OffThreadIsolate* isolate, int radix,
+ int charcount, ShouldThrow should_throw,
+ AllocationType allocation);
+template <typename LocalIsolate>
Handle<BigInt> BigInt::Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign) {
- Handle<MutableBigInt> bigint = MutableBigInt::Cast(x);
+ Handle<MutableBigInt> bigint = Handle<MutableBigInt>::cast(x);
bigint->set_sign(sign);
- return MutableBigInt::MakeImmutable(bigint);
+ return MutableBigInt::MakeImmutable<Isolate>(bigint);
}
+template Handle<BigInt> BigInt::Finalize<Isolate>(
+ Handle<FreshlyAllocatedBigInt>, bool);
+template Handle<BigInt> BigInt::Finalize<OffThreadIsolate>(
+ Handle<FreshlyAllocatedBigInt>, bool);
+
// The serialization format MUST NOT CHANGE without updating the format
// version in value-serializer.cc!
uint32_t BigInt::GetBitfieldForSerialization() const {
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 2247c157a8..ccdbf33f61 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -238,17 +238,22 @@ class BigInt : public BigIntBase {
class BodyDescriptor;
private:
+ template <typename LocalIsolate>
friend class StringToBigIntHelper;
friend class ValueDeserializer;
friend class ValueSerializer;
// Special functions for StringToBigIntHelper:
- static Handle<BigInt> Zero(Isolate* isolate);
+ template <typename LocalIsolate>
+ static Handle<BigInt> Zero(LocalIsolate* isolate, AllocationType allocation =
+ AllocationType::kYoung);
+ template <typename LocalIsolate>
static MaybeHandle<FreshlyAllocatedBigInt> AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ LocalIsolate* isolate, int radix, int charcount, ShouldThrow should_throw,
AllocationType allocation);
- static void InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
- uintptr_t factor, uintptr_t summand);
+ static void InplaceMultiplyAdd(FreshlyAllocatedBigInt x, uintptr_t factor,
+ uintptr_t summand);
+ template <typename LocalIsolate>
static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
// Special functions for ValueSerializer/ValueDeserializer:
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index d02ead4bbe..81db1745c4 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -733,6 +733,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DEFINE_DEOPT_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 7f650bebfb..4f272f1299 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -4,6 +4,7 @@
#include <iomanip>
+#include "src/execution/isolate-utils.h"
#include "src/objects/code.h"
#include "src/codegen/assembler-inl.h"
@@ -159,10 +160,12 @@ int AbstractCode::SourcePosition(int offset) {
if (maybe_table.IsException()) return kNoSourcePosition;
ByteArray source_position_table = ByteArray::cast(maybe_table);
- int position = 0;
// Subtract one because the current PC is one instruction after the call site.
if (IsCode()) offset--;
- for (SourcePositionTableIterator iterator(source_position_table);
+ int position = 0;
+ for (SourcePositionTableIterator iterator(
+ source_position_table, SourcePositionTableIterator::kJavaScriptOnly,
+ SourcePositionTableIterator::kDontSkipFunctionEntry);
!iterator.done() && iterator.code_offset() <= offset;
iterator.Advance()) {
position = iterator.source_position().ScriptOffset();
@@ -961,14 +964,14 @@ bool DependentCode::Compact() {
}
bool DependentCode::MarkCodeForDeoptimization(
- Isolate* isolate, DependentCode::DependencyGroup group) {
+ DependentCode::DependencyGroup group) {
if (this->length() == 0 || this->group() > group) {
// There is no such group.
return false;
}
if (this->group() < group) {
// The group comes later in the list.
- return next_link().MarkCodeForDeoptimization(isolate, group);
+ return next_link().MarkCodeForDeoptimization(group);
}
DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation_scope;
@@ -992,12 +995,12 @@ bool DependentCode::MarkCodeForDeoptimization(
}
void DependentCode::DeoptimizeDependentCodeGroup(
- Isolate* isolate, DependentCode::DependencyGroup group) {
+ DependentCode::DependencyGroup group) {
DisallowHeapAllocation no_allocation_scope;
- bool marked = MarkCodeForDeoptimization(isolate, group);
+ bool marked = MarkCodeForDeoptimization(group);
if (marked) {
DCHECK(AllowCodeDependencyChange::IsAllowed());
- Deoptimizer::DeoptimizeMarkedCode(isolate);
+ Deoptimizer::DeoptimizeMarkedCode(GetIsolateFromWritableObject(*this));
}
}
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 3e8de6877d..d80e72fa03 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -424,7 +424,7 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize =
FLAG_enable_embedded_constant_pool ? 28 : 0;
#elif V8_TARGET_ARCH_S390X
- static constexpr int kHeaderPaddingSize = 0;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
#else
#error Unknown architecture.
#endif
@@ -659,9 +659,9 @@ class DependentCode : public WeakFixedArray {
Handle<HeapObject> object,
DependencyGroup group);
- void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroup group);
+ void DeoptimizeDependentCodeGroup(DependencyGroup group);
- bool MarkCodeForDeoptimization(Isolate* isolate, DependencyGroup group);
+ bool MarkCodeForDeoptimization(DependencyGroup group);
// The following low-level accessors are exposed only for tests.
inline DependencyGroup group();
@@ -864,7 +864,8 @@ class DeoptimizationData : public FixedArray {
static const int kSharedFunctionInfoIndex = 6;
static const int kInliningPositionsIndex = 7;
static const int kDeoptExitStartIndex = 8;
- static const int kFirstDeoptEntryIndex = 9;
+ static const int kNonLazyDeoptCountIndex = 9;
+ static const int kFirstDeoptEntryIndex = 10;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kBytecodeOffsetRawOffset = 0;
@@ -886,6 +887,7 @@ class DeoptimizationData : public FixedArray {
DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DECL_ELEMENT_ACCESSORS(DeoptExitStart, Smi)
+ DECL_ELEMENT_ACCESSORS(NonLazyDeoptCount, Smi)
#undef DECL_ELEMENT_ACCESSORS
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index 2072339c5e..dd507f3c2d 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -23,7 +23,7 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
return key->IsMatch(value);
}
- static inline uint32_t Hash(Isolate* isolate, HashTableKey* key) {
+ static inline uint32_t Hash(ReadOnlyRoots roots, HashTableKey* key) {
return key->Hash();
}
@@ -70,6 +70,8 @@ class InfoCellPair {
FeedbackCell feedback_cell_;
};
+EXTERN_DECLARE_HASH_TABLE(CompilationCacheTable, CompilationCacheShape)
+
// This cache is used in two different variants. For regexp caching, it simply
// maps identifying info of the regexp to the cached regexp object. Scripts and
// eval code only gets cached after a second probe for the code object. To do
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index 5aa8a78953..686a3c689e 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -36,6 +36,16 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
return result;
}
+void Context::Initialize(Isolate* isolate) {
+ ScopeInfo scope_info = this->scope_info();
+ int header = scope_info.ContextHeaderLength();
+ for (int var = 0; var < scope_info.ContextLocalCount(); var++) {
+ if (scope_info.ContextLocalInitFlag(var) == kNeedsInitialization) {
+ set(header + var, ReadOnlyRoots(isolate).the_hole_value());
+ }
+ }
+}
+
bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
String name, LookupResult* result) {
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 2289cd6210..a273549d96 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -63,6 +63,9 @@ enum ContextLookupFlags {
/* it's already UBSan-fiendly and doesn't require a star... So declare */ \
/* it as a HeapObject for now. */ \
V(EMBEDDER_DATA_INDEX, HeapObject, embedder_data) \
+ V(CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX, HeapObject, \
+ continuation_preserved_embedder_data) \
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
/* Below is alpha-sorted */ \
V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
accessor_property_descriptor_map) \
@@ -188,13 +191,13 @@ enum ContextLookupFlags {
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
V(WEAK_CELL_MAP_INDEX, Map, weak_cell_map) \
- V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_MAP_INDEX, Map, \
- js_finalization_group_cleanup_iterator_map) \
+ V(JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_MAP_INDEX, Map, \
+ js_finalization_registry_cleanup_iterator_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(JS_WEAK_REF_FUNCTION_INDEX, JSFunction, js_weak_ref_fun) \
- V(JS_FINALIZATION_GROUP_FUNCTION_INDEX, JSFunction, \
- js_finalization_group_fun) \
+ V(JS_FINALIZATION_REGISTRY_FUNCTION_INDEX, JSFunction, \
+ js_finalization_registry_fun) \
/* Context maps */ \
V(NATIVE_CONTEXT_MAP_INDEX, Map, native_context_map) \
V(FUNCTION_CONTEXT_MAP_INDEX, Map, function_context_map) \
@@ -348,6 +351,7 @@ enum ContextLookupFlags {
V(MAP_SET_INDEX, JSFunction, map_set) \
V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(OBJECT_VALUE_OF_FUNCTION_INDEX, JSFunction, object_value_of_function) \
V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
@@ -367,8 +371,7 @@ enum ContextLookupFlags {
V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
- V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache) \
- NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)
+ V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache)
// A table of all script contexts. Every loaded top-level script with top-level
// lexical declarations contributes its ScriptContext into this table.
@@ -507,6 +510,10 @@ class Context : public HeapObject {
return SizeFor(index) - kHeapObjectTag;
}
+ // Initializes the variable slots of the context. Lexical variables that need
+ // initialization are filled with the hole.
+ void Initialize(Isolate* isolate);
+
// TODO(ishell): eventually migrate to the offset based access instead of
// index-based.
// The default context slot layout; indices are FixedArray slot indices.
@@ -679,8 +686,6 @@ class Context : public HeapObject {
#endif
OBJECT_CONSTRUCTORS(Context, HeapObject);
- DECL_INT_ACCESSORS(length_and_extension_flag)
- DECL_SYNCHRONIZED_INT_ACCESSORS(length_and_extension_flag)
};
class NativeContext : public Context {
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index 5af3011d51..6def2320ff 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -59,6 +59,8 @@ extern enum NativeContextSlot extends intptr constexpr 'Context::Field' {
PROMISE_THEN_FINALLY_SHARED_FUN,
PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN,
STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+
+ CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
...
}
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 6a0dc76f64..c5b024ed1d 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -20,16 +20,11 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(BreakPoint)
TQ_OBJECT_CONSTRUCTORS_IMPL(BreakPointInfo)
-OBJECT_CONSTRUCTORS_IMPL(CoverageInfo, FixedArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(CoverageInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(DebugInfo)
NEVER_READ_ONLY_SPACE_IMPL(DebugInfo)
-CAST_ACCESSOR(CoverageInfo)
-
-TQ_SMI_ACCESSORS(DebugInfo, flags)
-TQ_SMI_ACCESSORS(DebugInfo, debugger_hints)
-
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state,
DebugInfo::SideEffectStateBits)
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, debug_is_blackboxed,
@@ -39,10 +34,6 @@ BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, computed_debug_is_blackboxed,
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, debugging_id,
DebugInfo::DebuggingIdBits)
-TQ_SMI_ACCESSORS(BreakPointInfo, source_position)
-
-TQ_SMI_ACCESSORS(BreakPoint, id)
-
bool DebugInfo::HasInstrumentedBytecodeArray() {
DCHECK_EQ(debug_bytecode_array().IsBytecodeArray(),
original_bytecode_array().IsBytecodeArray());
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 3f7f55f852..48dc2d5b8d 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -357,64 +357,56 @@ int BreakPointInfo::GetBreakPointCount(Isolate* isolate) {
return FixedArray::cast(break_points()).length();
}
-int CoverageInfo::SlotCount() const {
- DCHECK_EQ(kFirstSlotIndex, length() % kSlotIndexCount);
- return (length() - kFirstSlotIndex) / kSlotIndexCount;
+int CoverageInfo::SlotFieldOffset(int slot_index, int field_offset) const {
+ DCHECK_LT(field_offset, Slot::kSize);
+ DCHECK_LT(slot_index, slot_count());
+ return kSlotsOffset + slot_index * Slot::kSize + field_offset;
}
int CoverageInfo::StartSourcePosition(int slot_index) const {
- DCHECK_LT(slot_index, SlotCount());
- const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
- return Smi::ToInt(get(slot_start + kSlotStartSourcePositionIndex));
+ return ReadField<int32_t>(
+ SlotFieldOffset(slot_index, Slot::kStartSourcePositionOffset));
}
int CoverageInfo::EndSourcePosition(int slot_index) const {
- DCHECK_LT(slot_index, SlotCount());
- const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
- return Smi::ToInt(get(slot_start + kSlotEndSourcePositionIndex));
+ return ReadField<int32_t>(
+ SlotFieldOffset(slot_index, Slot::kEndSourcePositionOffset));
}
int CoverageInfo::BlockCount(int slot_index) const {
- DCHECK_LT(slot_index, SlotCount());
- const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
- return Smi::ToInt(get(slot_start + kSlotBlockCountIndex));
+ return ReadField<int32_t>(
+ SlotFieldOffset(slot_index, Slot::kBlockCountOffset));
}
void CoverageInfo::InitializeSlot(int slot_index, int from_pos, int to_pos) {
- DCHECK_LT(slot_index, SlotCount());
- const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
- set(slot_start + kSlotStartSourcePositionIndex, Smi::FromInt(from_pos));
- set(slot_start + kSlotEndSourcePositionIndex, Smi::FromInt(to_pos));
- set(slot_start + kSlotBlockCountIndex, Smi::zero());
-}
-
-void CoverageInfo::IncrementBlockCount(int slot_index) {
- DCHECK_LT(slot_index, SlotCount());
- const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
- const int old_count = BlockCount(slot_index);
- set(slot_start + kSlotBlockCountIndex, Smi::FromInt(old_count + 1));
+ WriteField<int32_t>(
+ SlotFieldOffset(slot_index, Slot::kStartSourcePositionOffset), from_pos);
+ WriteField<int32_t>(
+ SlotFieldOffset(slot_index, Slot::kEndSourcePositionOffset), to_pos);
+ ResetBlockCount(slot_index);
+ WriteField<int32_t>(SlotFieldOffset(slot_index, Slot::kPaddingOffset), 0);
}
void CoverageInfo::ResetBlockCount(int slot_index) {
- DCHECK_LT(slot_index, SlotCount());
- const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
- set(slot_start + kSlotBlockCountIndex, Smi::zero());
+ WriteField<int32_t>(SlotFieldOffset(slot_index, Slot::kBlockCountOffset), 0);
}
-void CoverageInfo::Print(std::unique_ptr<char[]> function_name) {
+void CoverageInfo::CoverageInfoPrint(std::ostream& os,
+ std::unique_ptr<char[]> function_name) {
DCHECK(FLAG_trace_block_coverage);
DisallowHeapAllocation no_gc;
- StdoutStream os;
os << "Coverage info (";
- if (strlen(function_name.get()) > 0) {
+ if (function_name == nullptr) {
+ os << "{unknown}";
+ } else if (strlen(function_name.get()) > 0) {
os << function_name.get();
} else {
os << "{anonymous}";
}
os << "):" << std::endl;
- for (int i = 0; i < SlotCount(); i++) {
+ for (int i = 0; i < slot_count(); i++) {
os << "{" << StartSourcePosition(i) << "," << EndSourcePosition(i) << "}"
<< std::endl;
}
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 321ac11cda..a7ce9ec367 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -11,6 +11,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -26,24 +27,9 @@ class BytecodeArray;
class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
- enum Flag {
- kNone = 0,
- kHasBreakInfo = 1 << 0,
- kPreparedForDebugExecution = 1 << 1,
- kHasCoverageInfo = 1 << 2,
- kBreakAtEntry = 1 << 3,
- kCanBreakAtEntry = 1 << 4,
- kDebugExecutionMode = 1 << 5
- };
-
+ DEFINE_TORQUE_GENERATED_DEBUG_INFO_FLAGS()
using Flags = base::Flags<Flag>;
- // A bitfield that lists uses of the current instance.
- DECL_INT_ACCESSORS(flags)
-
- // Bit field containing various information collected for debugging.
- DECL_INT_ACCESSORS(debugger_hints)
-
// DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
bool IsEmpty() const;
@@ -127,15 +113,8 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
// This could also be implemented as a weak hash table.
DECL_INT_ACCESSORS(debugging_id)
-// Bit positions in |debugger_hints|.
-#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
- V(SideEffectStateBits, int, 2, _) \
- V(DebugIsBlackboxedBit, bool, 1, _) \
- V(ComputedDebugIsBlackboxedBit, bool, 1, _) \
- V(DebuggingIdBits, int, 20, _)
-
- DEFINE_BIT_FIELDS(DEBUGGER_HINTS_BIT_FIELDS)
-#undef DEBUGGER_HINTS_BIT_FIELDS
+ // Bit positions in |debugger_hints|.
+ DEFINE_TORQUE_GENERATED_DEBUGGER_HINTS()
static const int kNoDebuggingId = 0;
@@ -165,9 +144,6 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
class BreakPointInfo
: public TorqueGeneratedBreakPointInfo<BreakPointInfo, Struct> {
public:
- // The position in the source for the break position.
- DECL_INT_ACCESSORS(source_position)
-
// Removes a break point.
static void ClearBreakPoint(Isolate* isolate, Handle<BreakPointInfo> info,
Handle<BreakPoint> break_point);
@@ -190,54 +166,39 @@ class BreakPointInfo
};
// Holds information related to block code coverage.
-class CoverageInfo : public FixedArray {
+class CoverageInfo
+ : public TorqueGeneratedCoverageInfo<CoverageInfo, HeapObject> {
public:
- int SlotCount() const;
-
int StartSourcePosition(int slot_index) const;
int EndSourcePosition(int slot_index) const;
int BlockCount(int slot_index) const;
void InitializeSlot(int slot_index, int start_pos, int end_pos);
- void IncrementBlockCount(int slot_index);
void ResetBlockCount(int slot_index);
- static int FixedArrayLengthForSlotCount(int slot_count) {
- return slot_count * kSlotIndexCount + kFirstSlotIndex;
+ // Computes the size for a CoverageInfo instance of a given length.
+ static int SizeFor(int slot_count) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + slot_count * Slot::kSize);
}
- DECL_CAST(CoverageInfo)
-
// Print debug info.
- void Print(std::unique_ptr<char[]> function_name);
-
- static const int kFirstSlotIndex = 0;
+ void CoverageInfoPrint(std::ostream& os,
+ std::unique_ptr<char[]> function_name = nullptr);
- // Each slot is assigned a group of indices starting at kFirstSlotIndex.
- // Within this group, semantics are as follows:
- static const int kSlotStartSourcePositionIndex = 0;
- static const int kSlotEndSourcePositionIndex = 1;
- static const int kSlotBlockCountIndex = 2;
- static const int kSlotPaddingIndex = 3; // Padding to make the index count 4.
- static const int kSlotIndexCount = 4;
+ class BodyDescriptor; // GC visitor.
- static const int kSlotIndexCountLog2 = 2;
- static const int kSlotIndexCountMask = (kSlotIndexCount - 1);
- STATIC_ASSERT(1 << kSlotIndexCountLog2 == kSlotIndexCount);
+ // Description of layout within each slot.
+ using Slot = TorqueGeneratedCoverageInfoSlotOffsets;
private:
- static int FirstIndexForSlot(int slot_index) {
- return kFirstSlotIndex + slot_index * kSlotIndexCount;
- }
+ int SlotFieldOffset(int slot_index, int field_offset) const;
- OBJECT_CONSTRUCTORS(CoverageInfo, FixedArray);
+ TQ_OBJECT_CONSTRUCTORS(CoverageInfo)
};
// Holds breakpoint related information. This object is used by inspector.
class BreakPoint : public TorqueGeneratedBreakPoint<BreakPoint, Struct> {
public:
- DECL_INT_ACCESSORS(id)
-
TQ_OBJECT_CONSTRUCTORS(BreakPoint)
};
diff --git a/deps/v8/src/objects/debug-objects.tq b/deps/v8/src/objects/debug-objects.tq
index fb52af5a51..d50d86c482 100644
--- a/deps/v8/src/objects/debug-objects.tq
+++ b/deps/v8/src/objects/debug-objects.tq
@@ -18,12 +18,27 @@ extern class BreakPointInfo extends Struct {
break_points: FixedArray|BreakPoint|Undefined;
}
-type CoverageInfo extends FixedArray;
+bitfield struct DebugInfoFlags extends uint31 {
+ has_break_info: bool: 1 bit;
+ prepared_for_debug_execution: bool: 1 bit;
+ has_coverage_info: bool: 1 bit;
+ break_at_entry: bool: 1 bit;
+ can_break_at_entry: bool: 1 bit;
+ debug_execution_mode: bool: 1 bit;
+}
+
+bitfield struct DebuggerHints extends uint31 {
+ side_effect_state: int32: 2 bit;
+ debug_is_blackboxed: bool: 1 bit;
+ computed_debug_is_blackboxed: bool: 1 bit;
+ debugging_id: int32: 20 bit;
+}
@generateCppClass
extern class DebugInfo extends Struct {
shared: SharedFunctionInfo;
- debugger_hints: Smi;
+ // Bit field containing various information collected for debugging.
+ debugger_hints: SmiTagged<DebuggerHints>;
// Script field from shared function info.
script: Undefined|Script;
// The original uninstrumented bytecode array for functions with break
@@ -34,6 +49,23 @@ extern class DebugInfo extends Struct {
debug_bytecode_array: Undefined|BytecodeArray;
// Fixed array holding status information for each active break point.
break_points: FixedArray;
- flags: Smi;
+ // A bitfield that lists uses of the current instance.
+ flags: SmiTagged<DebugInfoFlags>;
coverage_info: CoverageInfo|Undefined;
}
+
+@export
+struct CoverageInfoSlot {
+ start_source_position: int32;
+ end_source_position: int32;
+ block_count: int32;
+ padding: int32; // Padding to make the index count 4.
+}
+
+// CoverageInfo's visitor is included in DATA_ONLY_VISITOR_ID_LIST, so it must
+// not contain any HeapObject fields.
+@generateCppClass
+extern class CoverageInfo extends HeapObject {
+ const slot_count: int32;
+ slots[slot_count]: CoverageInfoSlot;
+}
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 0085a1d78d..e24673d01c 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_DESCRIPTOR_ARRAY_H_
#define V8_OBJECTS_DESCRIPTOR_ARRAY_H_
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
// TODO(jkummerow): Consider forward-declaring instead.
#include "src/base/bit-field.h"
@@ -126,9 +127,10 @@ class DescriptorArray : public HeapObject {
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate(Isolate* isolate,
- int nof_descriptors,
- int slack);
+ template <typename LocalIsolate>
+ V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate(
+ LocalIsolate* isolate, int nof_descriptors, int slack,
+ AllocationType allocation = AllocationType::kYoung);
void Initialize(EnumCache enum_cache, HeapObject undefined_value,
int nof_descriptors, int slack);
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index ac512235c0..53c209e237 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -17,7 +17,7 @@ struct DescriptorEntry {
}
extern class DescriptorArray extends HeapObject {
- number_of_all_descriptors: uint16;
+ const number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
raw_number_of_marked_descriptors: uint16;
filler16_bits: uint16;
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index 965fcb989c..10efd226af 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -54,10 +54,9 @@ PropertyDetails Dictionary<Derived, Shape>::DetailsAt(InternalIndex entry) {
}
template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::DetailsAtPut(Isolate* isolate,
- InternalIndex entry,
+void Dictionary<Derived, Shape>::DetailsAtPut(InternalIndex entry,
PropertyDetails value) {
- Shape::DetailsAtPut(isolate, Derived::cast(*this), entry, value);
+ Shape::DetailsAtPut(Derived::cast(*this), entry, value);
}
template <typename Derived, typename Shape>
@@ -128,16 +127,15 @@ void NumberDictionary::set_requires_slow_elements() {
}
template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::ClearEntry(Isolate* isolate,
- InternalIndex entry) {
+void Dictionary<Derived, Shape>::ClearEntry(InternalIndex entry) {
Object the_hole = this->GetReadOnlyRoots().the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
- Derived::cast(*this).SetEntry(isolate, entry, the_hole, the_hole, details);
+ Derived::cast(*this).SetEntry(entry, the_hole, the_hole, details);
}
template <typename Derived, typename Shape>
-void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, InternalIndex entry,
- Object key, Object value,
+void Dictionary<Derived, Shape>::SetEntry(InternalIndex entry, Object key,
+ Object value,
PropertyDetails details) {
DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
DCHECK(!key.IsName() || details.dictionary_index() > 0);
@@ -146,7 +144,7 @@ void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, InternalIndex entry,
WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
this->set(index + Derived::kEntryKeyIndex, key, mode);
this->set(index + Derived::kEntryValueIndex, value, mode);
- if (Shape::kHasDetails) DetailsAtPut(isolate, entry, details);
+ if (Shape::kHasDetails) DetailsAtPut(entry, details);
}
template <typename Derived, typename Shape>
@@ -167,7 +165,7 @@ PropertyDetails BaseDictionaryShape<Key>::DetailsAt(Dictionary dict,
template <typename Key>
template <typename Dictionary>
-void BaseDictionaryShape<Key>::DetailsAtPut(Isolate* isolate, Dictionary dict,
+void BaseDictionaryShape<Key>::DetailsAtPut(Dictionary dict,
InternalIndex entry,
PropertyDetails value) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
@@ -179,8 +177,8 @@ Object GlobalDictionaryShape::Unwrap(Object object) {
return PropertyCell::cast(object).name();
}
-RootIndex GlobalDictionaryShape::GetMapRootIndex() {
- return RootIndex::kGlobalDictionaryMap;
+Handle<Map> GlobalDictionaryShape::GetMap(ReadOnlyRoots roots) {
+ return roots.global_dictionary_map_handle();
}
Name NameDictionary::NameAt(InternalIndex entry) {
@@ -192,8 +190,8 @@ Name NameDictionary::NameAt(const Isolate* isolate, InternalIndex entry) {
return Name::cast(KeyAt(isolate, entry));
}
-RootIndex NameDictionaryShape::GetMapRootIndex() {
- return RootIndex::kNameDictionaryMap;
+Handle<Map> NameDictionaryShape::GetMap(ReadOnlyRoots roots) {
+ return roots.name_dictionary_map_handle();
}
PropertyCell GlobalDictionary::CellAt(InternalIndex entry) {
@@ -234,12 +232,11 @@ Object GlobalDictionary::ValueAt(const Isolate* isolate, InternalIndex entry) {
return CellAt(isolate, entry).value(isolate);
}
-void GlobalDictionary::SetEntry(Isolate* isolate, InternalIndex entry,
- Object key, Object value,
+void GlobalDictionary::SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details) {
DCHECK_EQ(key, PropertyCell::cast(value).name());
set(EntryToIndex(entry) + kEntryKeyIndex, value);
- DetailsAtPut(isolate, entry, details);
+ DetailsAtPut(entry, details);
}
void GlobalDictionary::ValueAtPut(InternalIndex entry, Object value) {
@@ -251,8 +248,8 @@ bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object other) {
return key == static_cast<uint32_t>(other.Number());
}
-uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
- return ComputeSeededHash(key, HashSeed(isolate));
+uint32_t NumberDictionaryBaseShape::Hash(ReadOnlyRoots roots, uint32_t key) {
+ return ComputeSeededHash(key, HashSeed(roots));
}
uint32_t NumberDictionaryBaseShape::HashForObject(ReadOnlyRoots roots,
@@ -267,12 +264,17 @@ Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
return isolate->factory()->NewNumberFromUint(key);
}
-RootIndex NumberDictionaryShape::GetMapRootIndex() {
- return RootIndex::kNumberDictionaryMap;
+Handle<Object> NumberDictionaryBaseShape::AsHandle(OffThreadIsolate* isolate,
+ uint32_t key) {
+ return isolate->factory()->NewNumberFromUint<AllocationType::kOld>(key);
+}
+
+Handle<Map> NumberDictionaryShape::GetMap(ReadOnlyRoots roots) {
+ return roots.number_dictionary_map_handle();
}
-RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() {
- return RootIndex::kSimpleNumberDictionaryMap;
+Handle<Map> SimpleNumberDictionaryShape::GetMap(ReadOnlyRoots roots) {
+ return roots.simple_number_dictionary_map_handle();
}
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
@@ -281,7 +283,7 @@ bool NameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
return *key == other;
}
-uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
+uint32_t NameDictionaryShape::Hash(ReadOnlyRoots roots, Handle<Name> key) {
return key->Hash();
}
@@ -305,6 +307,12 @@ Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
return key;
}
+Handle<Object> NameDictionaryShape::AsHandle(OffThreadIsolate* isolate,
+ Handle<Name> key) {
+ DCHECK(key->IsUniqueName());
+ return key;
+}
+
template <typename Dictionary>
PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary dict,
InternalIndex entry) {
@@ -313,14 +321,13 @@ PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary dict,
}
template <typename Dictionary>
-void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary dict,
- InternalIndex entry,
+void GlobalDictionaryShape::DetailsAtPut(Dictionary dict, InternalIndex entry,
PropertyDetails value) {
DCHECK(entry.is_found());
PropertyCell cell = dict.CellAt(entry);
if (cell.property_details().IsReadOnly() != value.IsReadOnly()) {
cell.dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPropertyCellChangedGroup);
+ DependentCode::kPropertyCellChangedGroup);
}
cell.set_property_details(value);
}
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 66b601dbca..04a06960c9 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -41,8 +41,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
inline PropertyDetails DetailsAt(InternalIndex entry);
// Set the details for entry.
- inline void DetailsAtPut(Isolate* isolate, InternalIndex entry,
- PropertyDetails value);
+ inline void DetailsAtPut(InternalIndex entry, PropertyDetails value);
// Delete a property from the dictionary.
V8_WARN_UNUSED_RESULT static Handle<Derived> DeleteEntry(
@@ -66,15 +65,16 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
Object SlowReverseLookup(Object value);
// Sets the entry to (key, value) pair.
- inline void ClearEntry(Isolate* isolate, InternalIndex entry);
- inline void SetEntry(Isolate* isolate, InternalIndex entry, Object key,
- Object value, PropertyDetails details);
+ inline void ClearEntry(InternalIndex entry);
+ inline void SetEntry(InternalIndex entry, Object key, Object value,
+ PropertyDetails details);
// Garbage collection support.
inline ObjectSlot RawFieldOfValueAt(InternalIndex entry);
+ template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> Add(
- Isolate* isolate, Handle<Derived> dictionary, Key key,
+ LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
@@ -89,6 +89,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, Shape>);
};
+#define EXTERN_DECLARE_DICTIONARY(DERIVED, SHAPE) \
+ EXTERN_DECLARE_HASH_TABLE(DERIVED, SHAPE) \
+ extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) \
+ Dictionary<DERIVED, SHAPE>;
+
template <typename Key>
class BaseDictionaryShape : public BaseShape<Key> {
public:
@@ -97,17 +102,19 @@ class BaseDictionaryShape : public BaseShape<Key> {
static inline PropertyDetails DetailsAt(Dictionary dict, InternalIndex entry);
template <typename Dictionary>
- static inline void DetailsAtPut(Isolate* isolate, Dictionary dict,
- InternalIndex entry, PropertyDetails value);
+ static inline void DetailsAtPut(Dictionary dict, InternalIndex entry,
+ PropertyDetails value);
};
class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
public:
static inline bool IsMatch(Handle<Name> key, Object other);
- static inline uint32_t Hash(Isolate* isolate, Handle<Name> key);
+ static inline uint32_t Hash(ReadOnlyRoots roots, Handle<Name> key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Object> AsHandle(OffThreadIsolate* isolate,
+ Handle<Name> key);
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const int kEntryValueIndex = 1;
@@ -129,8 +136,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
inline int Hash() const;
// Creates a new dictionary.
+ template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> New(
- Isolate* isolate, int at_least_space_for,
+ LocalIsolate* isolate, int at_least_space_for,
AllocationType allocation = AllocationType::kYoung,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
@@ -157,8 +165,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
Handle<FixedArray> storage, KeyCollectionMode mode,
KeyAccumulator* accumulator);
+ template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
- Isolate* isolate, Handle<Derived> dictionary, Key key,
+ LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
@@ -170,10 +179,12 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
OBJECT_CONSTRUCTORS(BaseNameDictionary, Dictionary<Derived, Shape>);
};
-class NameDictionary;
+#define EXTERN_DECLARE_BASE_NAME_DICTIONARY(DERIVED, SHAPE) \
+ EXTERN_DECLARE_DICTIONARY(DERIVED, SHAPE) \
+ extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) \
+ BaseNameDictionary<DERIVED, SHAPE>;
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- BaseNameDictionary<NameDictionary, NameDictionaryShape>;
+EXTERN_DECLARE_BASE_NAME_DICTIONARY(NameDictionary, NameDictionaryShape)
class V8_EXPORT_PRIVATE NameDictionary
: public BaseNameDictionary<NameDictionary, NameDictionaryShape> {
@@ -205,19 +216,16 @@ class V8_EXPORT_PRIVATE GlobalDictionaryShape : public NameDictionaryShape {
static inline PropertyDetails DetailsAt(Dictionary dict, InternalIndex entry);
template <typename Dictionary>
- static inline void DetailsAtPut(Isolate* isolate, Dictionary dict,
- InternalIndex entry, PropertyDetails value);
+ static inline void DetailsAtPut(Dictionary dict, InternalIndex entry,
+ PropertyDetails value);
static inline Object Unwrap(Object key);
static inline bool IsKey(ReadOnlyRoots roots, Object k);
static inline bool IsLive(ReadOnlyRoots roots, Object key);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
};
-class GlobalDictionary;
-
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>;
+EXTERN_DECLARE_BASE_NAME_DICTIONARY(GlobalDictionary, GlobalDictionaryShape)
class V8_EXPORT_PRIVATE GlobalDictionary
: public BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape> {
@@ -228,8 +236,8 @@ class V8_EXPORT_PRIVATE GlobalDictionary
inline Object ValueAt(const Isolate* isolate, InternalIndex entry);
inline PropertyCell CellAt(InternalIndex entry);
inline PropertyCell CellAt(const Isolate* isolate, InternalIndex entry);
- inline void SetEntry(Isolate* isolate, InternalIndex entry, Object key,
- Object value, PropertyDetails details);
+ inline void SetEntry(InternalIndex entry, Object key, Object value,
+ PropertyDetails details);
inline Name NameAt(InternalIndex entry);
inline Name NameAt(const Isolate* isolate, InternalIndex entry);
inline void ValueAtPut(InternalIndex entry, Object value);
@@ -243,8 +251,10 @@ class NumberDictionaryBaseShape : public BaseDictionaryShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
+ static inline Handle<Object> AsHandle(OffThreadIsolate* isolate,
+ uint32_t key);
- static inline uint32_t Hash(Isolate* isolate, uint32_t key);
+ static inline uint32_t Hash(ReadOnlyRoots roots, uint32_t key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
};
@@ -253,7 +263,7 @@ class NumberDictionaryShape : public NumberDictionaryBaseShape {
static const int kPrefixSize = 1;
static const int kEntrySize = 3;
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
};
class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
@@ -269,19 +279,15 @@ class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
}
template <typename Dictionary>
- static inline void DetailsAtPut(Isolate* isolate, Dictionary dict,
- InternalIndex entry, PropertyDetails value) {
+ static inline void DetailsAtPut(Dictionary dict, InternalIndex entry,
+ PropertyDetails value) {
UNREACHABLE();
}
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
};
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
-
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+EXTERN_DECLARE_DICTIONARY(SimpleNumberDictionary, SimpleNumberDictionaryShape)
// SimpleNumberDictionary is used to map number to an entry.
class SimpleNumberDictionary
@@ -300,11 +306,7 @@ class SimpleNumberDictionary
Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>);
};
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
-
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Dictionary<NumberDictionary, NumberDictionaryShape>;
+EXTERN_DECLARE_DICTIONARY(NumberDictionary, NumberDictionaryShape)
// NumberDictionary is used as elements backing store and provides a bitfield
// and stores property details for every entry.
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 4be8e0f0bd..d0c680d287 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -645,22 +645,22 @@ class ElementsAccessorBase : public InternalElementsAccessor {
UNREACHABLE();
}
- uint32_t Push(Handle<JSArray> receiver, Arguments* args,
+ uint32_t Push(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_size) final {
return Subclass::PushImpl(receiver, args, push_size);
}
- static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_sized) {
UNREACHABLE();
}
- uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
+ uint32_t Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t unshift_size) final {
return Subclass::UnshiftImpl(receiver, args, unshift_size);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t unshift_size) {
UNREACHABLE();
}
@@ -1383,7 +1383,7 @@ class DictionaryElementsAccessor
if (dict->IsKey(roots, index)) {
uint32_t number = static_cast<uint32_t>(index.Number());
if (length <= number && number < old_length) {
- dict->ClearEntry(isolate, entry);
+ dict->ClearEntry(entry);
removed_entries++;
}
}
@@ -1460,7 +1460,7 @@ class DictionaryElementsAccessor
details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
details.dictionary_index());
- dictionary.DetailsAtPut(object->GetIsolate(), entry, details);
+ dictionary.DetailsAtPut(entry, details);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -2091,7 +2091,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return Subclass::RemoveElement(receiver, AT_START);
}
- static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
@@ -2099,7 +2099,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
AT_END);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t unshift_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
@@ -2347,7 +2347,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static uint32_t AddArguments(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
- Arguments* args, uint32_t add_size,
+ BuiltinArguments* args, uint32_t add_size,
Where add_position) {
uint32_t length = Smi::ToInt(receiver->length());
DCHECK_LT(0, add_size);
@@ -2382,7 +2382,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return new_length;
}
- static void CopyArguments(Arguments* args, Handle<FixedArrayBase> dst_store,
+ static void CopyArguments(BuiltinArguments* args,
+ Handle<FixedArrayBase> dst_store,
uint32_t copy_size, uint32_t src_index,
uint32_t dst_index) {
// Add the provided values.
@@ -2564,7 +2565,7 @@ class FastNonextensibleObjectElementsAccessor
public:
using BackingStore = typename KindTraits::BackingStore;
- static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_size) {
UNREACHABLE();
}
@@ -2659,7 +2660,7 @@ class FastSealedObjectElementsAccessor
static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
- static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_size) {
UNREACHABLE();
}
@@ -2720,14 +2721,12 @@ class FastSealedObjectElementsAccessor
class FastPackedSealedObjectElementsAccessor
: public FastSealedObjectElementsAccessor<
FastPackedSealedObjectElementsAccessor,
- ElementsKindTraits<PACKED_SEALED_ELEMENTS>> {
-};
+ ElementsKindTraits<PACKED_SEALED_ELEMENTS>> {};
class FastHoleySealedObjectElementsAccessor
: public FastSealedObjectElementsAccessor<
FastHoleySealedObjectElementsAccessor,
- ElementsKindTraits<HOLEY_SEALED_ELEMENTS>> {
-};
+ ElementsKindTraits<HOLEY_SEALED_ELEMENTS>> {};
template <typename Subclass, typename KindTraits>
class FastFrozenObjectElementsAccessor
@@ -2771,7 +2770,7 @@ class FastFrozenObjectElementsAccessor
static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
- static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_size) {
UNREACHABLE();
}
@@ -2799,14 +2798,12 @@ class FastFrozenObjectElementsAccessor
class FastPackedFrozenObjectElementsAccessor
: public FastFrozenObjectElementsAccessor<
FastPackedFrozenObjectElementsAccessor,
- ElementsKindTraits<PACKED_FROZEN_ELEMENTS>> {
-};
+ ElementsKindTraits<PACKED_FROZEN_ELEMENTS>> {};
class FastHoleyFrozenObjectElementsAccessor
: public FastFrozenObjectElementsAccessor<
FastHoleyFrozenObjectElementsAccessor,
- ElementsKindTraits<HOLEY_FROZEN_ELEMENTS>> {
-};
+ ElementsKindTraits<HOLEY_FROZEN_ELEMENTS>> {};
class FastHoleyObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
@@ -4632,8 +4629,8 @@ class SlowStringWrapperElementsAccessor
} // namespace
-MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
- Arguments* args) {
+MaybeHandle<Object> ArrayConstructInitializeElements(
+ Handle<JSArray> array, JavaScriptArguments* args) {
if (args->length() == 0) {
// Optimize the case where there are no parameters passed.
JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
@@ -4669,7 +4666,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
// Set length and elements on the array.
int number_of_elements = args->length();
- JSObject::EnsureCanContainElements(array, args, 0, number_of_elements,
+ JSObject::EnsureCanContainElements(array, args, number_of_elements,
ALLOW_CONVERTED_DOUBLE_ELEMENTS);
// Allocate an appropriately typed elements array.
@@ -4792,7 +4789,8 @@ void ElementsAccessor::TearDown() {
elements_accessors_ = nullptr;
}
-Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
+Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate,
+ BuiltinArguments* args,
uint32_t concat_size,
uint32_t result_len) {
ElementsKind result_elements_kind = GetInitialFastElementsKind();
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index a5d8103e68..551183fe6d 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_ELEMENTS_H_
#define V8_OBJECTS_ELEMENTS_H_
+#include "src/builtins/builtins-utils.h"
#include "src/objects/elements-kind.h"
#include "src/objects/internal-index.h"
#include "src/objects/keys.h"
@@ -111,13 +112,13 @@ class ElementsAccessor {
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) = 0;
- static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
+ static Handle<JSArray> Concat(Isolate* isolate, BuiltinArguments* args,
uint32_t concat_size, uint32_t result_length);
- virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
+ virtual uint32_t Push(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t push_size) = 0;
- virtual uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
+ virtual uint32_t Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
uint32_t unshift_size) = 0;
virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
@@ -206,7 +207,7 @@ class ElementsAccessor {
};
V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
- Handle<JSArray> array, Arguments* args);
+ Handle<JSArray> array, JavaScriptArguments* args);
// Called directly from CSA.
// {raw_context}: Context pointer.
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
index 6775fa8075..9c514aef89 100644
--- a/deps/v8/src/objects/embedder-data-array-inl.h
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -16,7 +16,6 @@
namespace v8 {
namespace internal {
-TQ_SMI_ACCESSORS(EmbedderDataArray, length)
TQ_OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray)
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
index 668d890817..4fdf64bfc8 100644
--- a/deps/v8/src/objects/embedder-data-array.h
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -23,10 +23,6 @@ namespace internal {
class EmbedderDataArray
: public TorqueGeneratedEmbedderDataArray<EmbedderDataArray, HeapObject> {
public:
- // [length]: length of the array in an embedder data slots.
- V8_INLINE int length() const;
- V8_INLINE void set_length(int value);
-
// TODO(v8:8989): [torque] Support marker constants.
static const int kHeaderSize = kSize;
diff --git a/deps/v8/src/objects/embedder-data-array.tq b/deps/v8/src/objects/embedder-data-array.tq
index c576a06868..a3c4b6868e 100644
--- a/deps/v8/src/objects/embedder-data-array.tq
+++ b/deps/v8/src/objects/embedder-data-array.tq
@@ -4,5 +4,6 @@
@generateCppClass
extern class EmbedderDataArray extends HeapObject {
+ // length of the array in an embedder data slots.
length: Smi;
}
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 9a570092e8..36d9bc8569 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -30,7 +30,7 @@ void FeedbackCell::reset_feedback_vector(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot) {
- set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ SetInitialInterruptBudget();
if (value().IsUndefined() || value().IsClosureFeedbackCellArray()) return;
CHECK(value().IsFeedbackVector());
@@ -43,6 +43,18 @@ void FeedbackCell::reset_feedback_vector(
}
}
+void FeedbackCell::SetInitialInterruptBudget() {
+ if (FLAG_lazy_feedback_allocation) {
+ set_interrupt_budget(FLAG_budget_for_feedback_vector_allocation);
+ } else {
+ set_interrupt_budget(FLAG_interrupt_budget);
+ }
+}
+
+void FeedbackCell::SetInterruptBudget() {
+ set_interrupt_budget(FLAG_interrupt_budget);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index 29304f709d..9728f8e8c0 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -20,13 +20,6 @@ namespace internal {
// a native context.
class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> {
public:
- static int GetInitialInterruptBudget() {
- if (FLAG_lazy_feedback_allocation) {
- return FLAG_budget_for_feedback_vector_allocation;
- }
- return FLAG_interrupt_budget;
- }
-
// Dispatched behavior.
DECL_PRINTER(FeedbackCell)
@@ -38,6 +31,8 @@ class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> {
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot = base::nullopt);
+ inline void SetInitialInterruptBudget();
+ inline void SetInterruptBudget();
using BodyDescriptor =
FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 79382bb4d1..72b6e14883 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -149,9 +149,10 @@ bool FeedbackVector::has_optimization_marker() const {
// Conversion from an integer index to either a slot or an ic slot.
// static
-FeedbackSlot FeedbackVector::ToSlot(int index) {
- DCHECK_GE(index, 0);
- return FeedbackSlot(index);
+FeedbackSlot FeedbackVector::ToSlot(intptr_t index) {
+ DCHECK_LE(static_cast<uintptr_t>(index),
+ static_cast<uintptr_t>(std::numeric_limits<int>::max()));
+ return FeedbackSlot(static_cast<int>(index));
}
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 7028d2b0df..929b312f22 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "src/objects/feedback-vector.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
#include "src/objects/data-handler-inl.h"
@@ -73,9 +75,10 @@ void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
}
// static
-Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
+template <typename LocalIsolate>
+Handle<FeedbackMetadata> FeedbackMetadata::New(LocalIsolate* isolate,
const FeedbackVectorSpec* spec) {
- Factory* factory = isolate->factory();
+ auto* factory = isolate->factory();
const int slot_count = spec == nullptr ? 0 : spec->slots();
const int closure_feedback_cell_count =
@@ -111,6 +114,11 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
return metadata;
}
+template Handle<FeedbackMetadata> FeedbackMetadata::New(
+ Isolate* isolate, const FeedbackVectorSpec* spec);
+template Handle<FeedbackMetadata> FeedbackMetadata::New(
+ OffThreadIsolate* isolate, const FeedbackVectorSpec* spec);
+
bool FeedbackMetadata::SpecDiffersFrom(
const FeedbackVectorSpec* other_spec) const {
if (other_spec->slots() != slot_count()) {
@@ -360,10 +368,12 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
Code code = Code::cast(slot->GetHeapObject());
if (code.marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
- PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
+ CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
+ PrintF(scope.file(),
+ "[evicting optimizing code marked for deoptimization (%s) for ",
reason);
- shared.ShortPrint();
- PrintF("]\n");
+ shared.ShortPrint(scope.file());
+ PrintF(scope.file(), "]\n");
}
if (!code.deopt_already_counted()) {
code.set_deopt_already_counted(true);
@@ -631,9 +641,16 @@ InlineCacheState FeedbackNexus::ic_state() const {
if (feedback == MaybeObject::FromObject(
*FeedbackVector::MegamorphicSentinel(isolate))) {
return GENERIC;
- } else if (feedback->IsWeakOrCleared() ||
- (feedback->GetHeapObjectIfStrong(&heap_object) &&
- heap_object.IsAllocationSite())) {
+ } else if (feedback->IsWeakOrCleared()) {
+ if (feedback->GetHeapObjectIfWeak(&heap_object)) {
+ if (heap_object.IsFeedbackCell()) {
+ return POLYMORPHIC;
+ }
+ CHECK(heap_object.IsJSFunction() || heap_object.IsJSBoundFunction());
+ }
+ return MONOMORPHIC;
+ } else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
+ heap_object.IsAllocationSite()) {
return MONOMORPHIC;
}
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 730f6825f4..f1d2cc3029 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -234,7 +234,7 @@ class FeedbackVector : public HeapObject {
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
// Conversion from an integer index to the underlying array to a slot.
- static inline FeedbackSlot ToSlot(int index);
+ static inline FeedbackSlot ToSlot(intptr_t index);
inline MaybeObject Get(FeedbackSlot slot) const;
inline MaybeObject Get(const Isolate* isolate, FeedbackSlot slot) const;
inline MaybeObject get(int index) const;
@@ -512,8 +512,9 @@ class FeedbackMetadata : public HeapObject {
V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const;
// If {spec} is null, then it is considered empty.
+ template <typename LocalIsolate>
V8_EXPORT_PRIVATE static Handle<FeedbackMetadata> New(
- Isolate* isolate, const FeedbackVectorSpec* spec = nullptr);
+ LocalIsolate* isolate, const FeedbackVectorSpec* spec = nullptr);
DECL_PRINTER(FeedbackMetadata)
DECL_VERIFIER(FeedbackMetadata)
@@ -565,7 +566,7 @@ class FeedbackMetadata : public HeapObject {
// possibly be confused with a pointer.
// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
-STATIC_ASSERT(Name::kEmptyHashField == 0x7);
+STATIC_ASSERT(Name::kEmptyHashField == 0x3);
// Verify that a set hash field will not look like a tagged object.
STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
diff --git a/deps/v8/src/objects/fixed-array.tq b/deps/v8/src/objects/fixed-array.tq
index a0cd748b41..0c57e0bc63 100644
--- a/deps/v8/src/objects/fixed-array.tq
+++ b/deps/v8/src/objects/fixed-array.tq
@@ -4,7 +4,7 @@
@abstract
extern class FixedArrayBase extends HeapObject {
- length: Smi;
+ const length: Smi;
}
extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
@@ -29,9 +29,9 @@ extern class TemplateList extends FixedArray {
}
extern class WeakArrayList extends HeapObject {
- capacity: Smi;
+ const capacity: Smi;
length: Smi;
- objects[length]: MaybeObject;
+ objects[capacity]: MaybeObject;
}
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index 848b8202ae..318272c340 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -37,7 +37,8 @@ void FreeSpace::set_next(FreeSpace next) {
}
FreeSpace FreeSpace::cast(HeapObject o) {
- SLOW_DCHECK(!GetHeapFromWritableObject(o)->deserialization_complete() ||
+ SLOW_DCHECK((!Heap::InOffThreadSpace(o) &&
+ !GetHeapFromWritableObject(o)->deserialization_complete()) ||
o.IsFreeSpace());
return bit_cast<FreeSpace>(o);
}
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 6a30ee1425..f713098f5f 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -123,17 +123,23 @@ void HashTableBase::SetNumberOfDeletedElements(int nod) {
}
template <typename Key>
-RootIndex BaseShape<Key>::GetMapRootIndex() {
- return RootIndex::kHashTableMap;
+Handle<Map> BaseShape<Key>::GetMap(ReadOnlyRoots roots) {
+ return roots.hash_table_map_handle();
}
-RootIndex EphemeronHashTableShape::GetMapRootIndex() {
- return RootIndex::kEphemeronHashTableMap;
+Handle<Map> EphemeronHashTableShape::GetMap(ReadOnlyRoots roots) {
+ return roots.ephemeron_hash_table_map_handle();
}
template <typename Derived, typename Shape>
InternalIndex HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
- return FindEntry(ReadOnlyRoots(isolate), key, Shape::Hash(isolate, key));
+ return FindEntry(ReadOnlyRoots(isolate), key);
+}
+
+template <typename Derived, typename Shape>
+InternalIndex HashTable<Derived, Shape>::FindEntry(ReadOnlyRoots roots,
+ Key key) {
+ return FindEntry(roots, key, Shape::Hash(roots, key));
}
// Find entry for key otherwise return kNotFound.
@@ -237,7 +243,7 @@ bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object other) {
return key->SameValue(other);
}
-uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
+uint32_t ObjectHashTableShape::Hash(ReadOnlyRoots roots, Handle<Object> key) {
return Smi::ToInt(key->GetHash());
}
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index a2d3221719..d59b1ae4fc 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -38,7 +38,7 @@ namespace internal {
// // Tells whether key matches other.
// static bool IsMatch(Key key, Object other);
// // Returns the hash value for key.
-// static uint32_t Hash(Isolate* isolate, Key key);
+// static uint32_t Hash(ReadOnlyRoots roots, Key key);
// // Returns the hash value for object.
// static uint32_t HashForObject(ReadOnlyRoots roots, Object object);
// // Convert key to an object.
@@ -60,7 +60,7 @@ template <typename KeyT>
class V8_EXPORT_PRIVATE BaseShape {
public:
using Key = KeyT;
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static const bool kNeedsHoleCheck = true;
static Object Unwrap(Object key) { return key; }
static inline bool IsKey(ReadOnlyRoots roots, Object key);
@@ -129,8 +129,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
using Key = typename Shape::Key;
// Returns a new HashTable object.
+ template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> New(
- Isolate* isolate, int at_least_space_for,
+ LocalIsolate* isolate, int at_least_space_for,
AllocationType allocation = AllocationType::kYoung,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
@@ -140,6 +141,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find entry for key otherwise return kNotFound.
inline InternalIndex FindEntry(ReadOnlyRoots roots, Key key, int32_t hash);
+ inline InternalIndex FindEntry(ReadOnlyRoots roots, Key key);
inline InternalIndex FindEntry(Isolate* isolate, Key key);
// Rehashes the table in-place.
@@ -191,8 +193,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
}
// Ensure enough space for n additional elements.
+ template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> EnsureCapacity(
- Isolate* isolate, Handle<Derived> table, int n = 1,
+ LocalIsolate* isolate, Handle<Derived> table, int n = 1,
AllocationType allocation = AllocationType::kYoung);
// Returns true if this table has sufficient capacity for adding n elements.
@@ -201,8 +204,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
protected:
friend class ObjectHashTable;
+ template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> NewInternal(
- Isolate* isolate, int capacity, AllocationType allocation);
+ LocalIsolate* isolate, int capacity, AllocationType allocation);
// Find the entry at which to insert element with the given key that
// has the given hash value.
@@ -243,6 +247,24 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
OBJECT_CONSTRUCTORS(HashTable, HashTableBase);
};
+#define EXTERN_DECLARE_HASH_TABLE(DERIVED, SHAPE) \
+ extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) \
+ HashTable<class DERIVED, SHAPE>; \
+ \
+ extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::New(Isolate*, int, AllocationType, \
+ MinimumCapacity); \
+ extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::New(OffThreadIsolate*, int, AllocationType, \
+ MinimumCapacity); \
+ \
+ extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::EnsureCapacity(Isolate*, Handle<DERIVED>, int, \
+ AllocationType); \
+ extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::EnsureCapacity( \
+ OffThreadIsolate*, Handle<DERIVED>, int, AllocationType);
+
// HashTableKey is an abstract superclass for virtual key behavior.
class HashTableKey {
public:
@@ -269,7 +291,7 @@ class HashTableKey {
class ObjectHashTableShape : public BaseShape<Handle<Object>> {
public:
static inline bool IsMatch(Handle<Object> key, Object other);
- static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
+ static inline uint32_t Hash(ReadOnlyRoots roots, Handle<Object> key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static inline Handle<Object> AsHandle(Handle<Object> key);
static const int kPrefixSize = 0;
@@ -321,12 +343,12 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) ObjectHashTableBase
OBJECT_CONSTRUCTORS(ObjectHashTableBase, HashTable<Derived, Shape>);
};
-class ObjectHashTable;
+#define EXTERN_DECLARE_OBJECT_BASE_HASH_TABLE(DERIVED, SHAPE) \
+ EXTERN_DECLARE_HASH_TABLE(DERIVED, SHAPE) \
+ extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) \
+ ObjectHashTableBase<class DERIVED, SHAPE>;
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) HashTable<ObjectHashTable, ObjectHashTableShape>;
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>;
+EXTERN_DECLARE_OBJECT_BASE_HASH_TABLE(ObjectHashTable, ObjectHashTableShape)
// ObjectHashTable maps keys that are arbitrary objects to object values by
// using the identity hash of the key for hashing purposes.
@@ -343,15 +365,11 @@ class V8_EXPORT_PRIVATE ObjectHashTable
class EphemeronHashTableShape : public ObjectHashTableShape {
public:
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
};
-class EphemeronHashTable;
-
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) HashTable<EphemeronHashTable, EphemeronHashTableShape>;
-extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>;
+EXTERN_DECLARE_OBJECT_BASE_HASH_TABLE(EphemeronHashTable,
+ EphemeronHashTableShape)
// EphemeronHashTable is similar to ObjectHashTable but gets special treatment
// by the GC. The GC treats its entries as ephemerons: both key and value are
@@ -383,9 +401,7 @@ class ObjectHashSetShape : public ObjectHashTableShape {
static const int kEntrySize = 1;
};
-class ObjectHashSet;
-extern template class EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) HashTable<ObjectHashSet, ObjectHashSetShape>;
+EXTERN_DECLARE_HASH_TABLE(ObjectHashSet, ObjectHashSetShape)
class V8_EXPORT_PRIVATE ObjectHashSet
: public HashTable<ObjectHashSet, ObjectHashSetShape> {
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index fcbb0ce833..b19d429320 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -84,9 +84,10 @@ class HeapObject : public Object {
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
- V8_INLINE bool Is##Type(Isolate* isolate) const; \
- V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ V8_INLINE bool Is##Type(Isolate* isolate) const; \
+ V8_INLINE bool Is##Type(OffThreadIsolate* isolate) const; \
+ V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
diff --git a/deps/v8/src/objects/heap-object.tq b/deps/v8/src/objects/heap-object.tq
index da80d0f944..e2f1fe4240 100644
--- a/deps/v8/src/objects/heap-object.tq
+++ b/deps/v8/src/objects/heap-object.tq
@@ -4,5 +4,5 @@
@abstract
extern class HeapObject extends StrongTagged {
- map: Map;
+ const map: Map;
}
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index acfb8759bf..5de264b1de 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -214,7 +214,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(V) \
TORQUE_INSTANCE_CHECKERS_SINGLE_ONLY_DECLARED(V) \
V(BigInt, BIGINT_TYPE) \
- V(CoverageInfo, FIXED_ARRAY_TYPE) \
V(FixedArrayExact, FIXED_ARRAY_TYPE)
#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index b201f7fb1e..dee27016d7 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -213,8 +213,8 @@ icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate,
return icu::UnicodeString(uchar_buffer, length);
}
-icu::StringPiece Intl::ToICUStringPiece(Isolate* isolate,
- Handle<String> string) {
+namespace {
+icu::StringPiece ToICUStringPiece(Isolate* isolate, Handle<String> string) {
DCHECK(string->IsFlat());
DisallowHeapAllocation no_gc;
@@ -231,7 +231,6 @@ icu::StringPiece Intl::ToICUStringPiece(Isolate* isolate,
return icu::StringPiece(char_buffer, length);
}
-namespace {
MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
bool is_to_upper, const char* lang) {
auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
@@ -432,7 +431,9 @@ std::string Intl::GetNumberingSystem(const icu::Locale& icu_locale) {
return "latn";
}
-icu::Locale Intl::CreateICULocale(const std::string& bcp47_locale) {
+namespace {
+
+Maybe<icu::Locale> CreateICULocale(const std::string& bcp47_locale) {
DisallowHeapAllocation no_gc;
// Convert BCP47 into ICU locale format.
@@ -441,12 +442,14 @@ icu::Locale Intl::CreateICULocale(const std::string& bcp47_locale) {
icu::Locale icu_locale = icu::Locale::forLanguageTag(bcp47_locale, status);
CHECK(U_SUCCESS(status));
if (icu_locale.isBogus()) {
- FATAL("Failed to create ICU locale, are ICU data files missing?");
+ return Nothing<icu::Locale>();
}
- return icu_locale;
+ return Just(icu_locale);
}
+} // anonymous namespace
+
// static
MaybeHandle<String> Intl::ToString(Isolate* isolate,
@@ -762,41 +765,15 @@ bool IsGrandfatheredTagWithoutPreferredVaule(const std::string& locale) {
return false;
}
-} // anonymous namespace
-
-Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
- Handle<Object> locale_in) {
- Handle<String> locale_str;
- // This does part of the validity checking spec'ed in CanonicalizeLocaleList:
- // 7c ii. If Type(kValue) is not String or Object, throw a TypeError
- // exception.
- // 7c iii. Let tag be ? ToString(kValue).
- // 7c iv. If IsStructurallyValidLanguageTag(tag) is false, throw a
- // RangeError exception.
-
- if (locale_in->IsString()) {
- locale_str = Handle<String>::cast(locale_in);
- } else if (locale_in->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, locale_str,
- Object::ToString(isolate, locale_in),
- Nothing<std::string>());
- } else {
- THROW_NEW_ERROR_RETURN_VALUE(isolate,
- NewTypeError(MessageTemplate::kLanguageID),
- Nothing<std::string>());
- }
- std::string locale(locale_str->ToCString().get());
-
- if (!IsStructurallyValidLanguageTag(locale)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
- }
- return Intl::CanonicalizeLanguageTag(isolate, locale);
+bool IsStructurallyValidLanguageTag(const std::string& tag) {
+ return JSLocale::StartsWithUnicodeLanguageId(tag);
}
-Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
- const std::string& locale_in) {
+// Canonicalize the locale.
+// https://tc39.github.io/ecma402/#sec-canonicalizelanguagetag,
+// including type check and structural validity check.
+Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
+ const std::string& locale_in) {
std::string locale = locale_in;
if (locale.length() == 0 ||
@@ -864,6 +841,39 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
return maybe_to_language_tag;
}
+Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
+ Handle<Object> locale_in) {
+ Handle<String> locale_str;
+ // This does part of the validity checking spec'ed in CanonicalizeLocaleList:
+ // 7c ii. If Type(kValue) is not String or Object, throw a TypeError
+ // exception.
+ // 7c iii. Let tag be ? ToString(kValue).
+ // 7c iv. If IsStructurallyValidLanguageTag(tag) is false, throw a
+ // RangeError exception.
+
+ if (locale_in->IsString()) {
+ locale_str = Handle<String>::cast(locale_in);
+ } else if (locale_in->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, locale_str,
+ Object::ToString(isolate, locale_in),
+ Nothing<std::string>());
+ } else {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kLanguageID),
+ Nothing<std::string>());
+ }
+ std::string locale(locale_str->ToCString().get());
+
+ if (!IsStructurallyValidLanguageTag(locale)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
+ Nothing<std::string>());
+ }
+ return CanonicalizeLanguageTag(isolate, locale);
+}
+
+} // anonymous namespace
+
Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
Isolate* isolate, Handle<Object> locales, bool only_return_one_result) {
// 1. If locales is undefined, then
@@ -1069,9 +1079,9 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
UCollationResult result;
UErrorCode status = U_ZERO_ERROR;
- icu::StringPiece string_piece1 = Intl::ToICUStringPiece(isolate, string1);
+ icu::StringPiece string_piece1 = ToICUStringPiece(isolate, string1);
if (!string_piece1.empty()) {
- icu::StringPiece string_piece2 = Intl::ToICUStringPiece(isolate, string2);
+ icu::StringPiece string_piece2 = ToICUStringPiece(isolate, string2);
if (!string_piece2.empty()) {
result = icu_collator.compareUTF8(string_piece1, string_piece2, status);
DCHECK(U_SUCCESS(status));
@@ -1857,7 +1867,7 @@ std::string LookupMatcher(Isolate* isolate,
// this method perform such normalization.
//
// ecma402/#sec-resolvelocale
-Intl::ResolvedLocale Intl::ResolveLocale(
+Maybe<Intl::ResolvedLocale> Intl::ResolveLocale(
Isolate* isolate, const std::set<std::string>& available_locales,
const std::vector<std::string>& requested_locales, MatcherOption matcher,
const std::set<std::string>& relevant_extension_keys) {
@@ -1868,7 +1878,9 @@ Intl::ResolvedLocale Intl::ResolveLocale(
locale = LookupMatcher(isolate, available_locales, requested_locales);
}
- icu::Locale icu_locale = CreateICULocale(locale);
+ Maybe<icu::Locale> maybe_icu_locale = CreateICULocale(locale);
+ MAYBE_RETURN(maybe_icu_locale, Nothing<Intl::ResolvedLocale>());
+ icu::Locale icu_locale = maybe_icu_locale.FromJust();
std::map<std::string, std::string> extensions =
LookupAndValidateUnicodeExtensions(&icu_locale, relevant_extension_keys);
@@ -1876,7 +1888,8 @@ Intl::ResolvedLocale Intl::ResolveLocale(
// TODO(gsathya): Remove privateuse subtags from extensions.
- return Intl::ResolvedLocale{canonicalized_locale, icu_locale, extensions};
+ return Just(
+ Intl::ResolvedLocale{canonicalized_locale, icu_locale, extensions});
}
Handle<Managed<icu::UnicodeString>> Intl::SetTextToBreakIterator(
@@ -2062,26 +2075,6 @@ base::TimezoneCache* Intl::CreateTimeZoneCache() {
: base::OS::CreateTimezoneCache();
}
-Maybe<Intl::CaseFirst> Intl::GetCaseFirst(Isolate* isolate,
- Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<Intl::CaseFirst>(
- isolate, options, "caseFirst", method, {"upper", "lower", "false"},
- {Intl::CaseFirst::kUpper, Intl::CaseFirst::kLower,
- Intl::CaseFirst::kFalse},
- Intl::CaseFirst::kUndefined);
-}
-
-Maybe<Intl::HourCycle> Intl::GetHourCycle(Isolate* isolate,
- Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<Intl::HourCycle>(
- isolate, options, "hourCycle", method, {"h11", "h12", "h23", "h24"},
- {Intl::HourCycle::kH11, Intl::HourCycle::kH12, Intl::HourCycle::kH23,
- Intl::HourCycle::kH24},
- Intl::HourCycle::kUndefined);
-}
-
Maybe<Intl::MatcherOption> Intl::GetLocaleMatcher(Isolate* isolate,
Handle<JSReceiver> options,
const char* method) {
@@ -2114,14 +2107,6 @@ Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
return Just(false);
}
-Intl::HourCycle Intl::ToHourCycle(const std::string& hc) {
- if (hc == "h11") return Intl::HourCycle::kH11;
- if (hc == "h12") return Intl::HourCycle::kH12;
- if (hc == "h23") return Intl::HourCycle::kH23;
- if (hc == "h24") return Intl::HourCycle::kH24;
- return Intl::HourCycle::kUndefined;
-}
-
const std::set<std::string>& Intl::GetAvailableLocalesForLocale() {
static base::LazyInstance<Intl::AvailableLocales<icu::Locale>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
@@ -2217,9 +2202,6 @@ MaybeHandle<String> Intl::FormattedToString(
return Intl::ToString(isolate, result);
}
-bool Intl::IsStructurallyValidLanguageTag(const std::string& tag) {
- return JSLocale::StartsWithUnicodeLanguageId(tag);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 3ae098f661..0d8cab42e8 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -27,7 +27,6 @@ namespace U_ICU_NAMESPACE {
class BreakIterator;
class Collator;
class FormattedValue;
-class SimpleDateFormat;
class UnicodeString;
} // namespace U_ICU_NAMESPACE
@@ -132,15 +131,6 @@ class Intl {
Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
int min, int max, int fallback);
- // Canonicalize the locale.
- // https://tc39.github.io/ecma402/#sec-canonicalizelanguagetag,
- // including type check and structural validity check.
- static Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
- Handle<Object> locale_in);
-
- static Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
- const std::string& locale);
-
// https://tc39.github.io/ecma402/#sec-canonicalizelocalelist
// {only_return_one_result} is an optimization for callers that only
// care about the first result.
@@ -189,8 +179,6 @@ class Intl {
int mnfd_default, int mxfd_default,
bool notation_is_compact);
- static icu::Locale CreateICULocale(const std::string& bcp47_locale);
-
// Helper funciton to convert a UnicodeString to a Handle<String>
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToString(
Isolate* isolate, const icu::UnicodeString& string);
@@ -239,22 +227,6 @@ class Intl {
Isolate* isolate, Handle<JSReceiver> receiver,
Handle<JSFunction> constructor, bool has_initialized_slot);
- // enum for "caseFirst" option: shared by Intl.Locale and Intl.Collator.
- enum class CaseFirst { kUndefined, kUpper, kLower, kFalse };
-
- // Shared function to read the "caseFirst" option.
- V8_WARN_UNUSED_RESULT static Maybe<CaseFirst> GetCaseFirst(
- Isolate* isolate, Handle<JSReceiver> options, const char* method);
-
- // enum for "hourCycle" option: shared by Intl.Locale and Intl.DateTimeFormat.
- enum class HourCycle { kUndefined, kH11, kH12, kH23, kH24 };
-
- static HourCycle ToHourCycle(const std::string& str);
-
- // Shared function to read the "hourCycle" option.
- V8_WARN_UNUSED_RESULT static Maybe<HourCycle> GetHourCycle(
- Isolate* isolate, Handle<JSReceiver> options, const char* method);
-
// enum for "localeMatcher" option: shared by many Intl objects.
enum class MatcherOption { kBestFit, kLookup };
@@ -286,7 +258,7 @@ class Intl {
std::map<std::string, std::string> extensions;
};
- static ResolvedLocale ResolveLocale(
+ static Maybe<ResolvedLocale> ResolveLocale(
Isolate* isolate, const std::set<std::string>& available_locales,
const std::vector<std::string>& requested_locales, MatcherOption options,
const std::set<std::string>& relevant_extension_keys);
@@ -335,10 +307,6 @@ class Intl {
static icu::UnicodeString ToICUUnicodeString(Isolate* isolate,
Handle<String> string);
- // Convert a Handle<String> to icu::StringPiece
- static icu::StringPiece ToICUStringPiece(Isolate* isolate,
- Handle<String> string);
-
static const uint8_t* ToLatin1LowerTable();
static String ConvertOneByteToLower(String src, String dst);
@@ -346,8 +314,6 @@ class Intl {
static const std::set<std::string>& GetAvailableLocalesForLocale();
static const std::set<std::string>& GetAvailableLocalesForDateFormat();
-
- static bool IsStructurallyValidLanguageTag(const std::string& tag);
};
} // namespace internal
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index f47c0f4adc..8f4f130a0d 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -15,64 +15,107 @@
#include 'src/objects/js-segment-iterator.h'
#include 'src/objects/js-segmenter.h'
+type DateTimeStyle extends int32 constexpr 'JSDateTimeFormat::DateTimeStyle';
+type HourCycle extends int32 constexpr 'JSDateTimeFormat::HourCycle';
+bitfield struct JSDateTimeFormatFlags extends uint31 {
+ hour_cycle: HourCycle: 3 bit;
+ date_style: DateTimeStyle: 3 bit;
+ time_style: DateTimeStyle: 3 bit;
+}
+
extern class JSDateTimeFormat extends JSObject {
locale: String;
icu_locale: Foreign; // Managed<icu::Locale>
icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
bound_format: JSFunction|Undefined;
- flags: Smi;
+ flags: SmiTagged<JSDateTimeFormatFlags>;
+}
+
+type JSDisplayNamesStyle extends int32 constexpr 'JSDisplayNames::Style';
+type JSDisplayNamesFallback extends int32
+constexpr 'JSDisplayNames::Fallback';
+bitfield struct JSDisplayNamesFlags extends uint31 {
+ style: JSDisplayNamesStyle: 2 bit;
+ fallback: JSDisplayNamesFallback: 1 bit;
}
extern class JSDisplayNames extends JSObject {
internal: Foreign; // Managed<DisplayNamesInternal>
- flags: Smi;
+ flags: SmiTagged<JSDisplayNamesFlags>;
+}
+
+type JSListFormatStyle extends int32 constexpr 'JSListFormat::Style';
+type JSListFormatType extends int32 constexpr 'JSListFormat::Type';
+bitfield struct JSListFormatFlags extends uint31 {
+ style: JSListFormatStyle: 2 bit;
+ Type: JSListFormatType: 2 bit; // "type" is a reserved word.
}
extern class JSListFormat extends JSObject {
locale: String;
icu_formatter: Foreign; // Managed<icu::ListFormatter>
- flags: Smi;
+ flags: SmiTagged<JSListFormatFlags>;
}
extern class JSNumberFormat extends JSObject {
locale: String;
- numberingSystem: String;
icu_number_formatter:
Foreign; // Managed<icu::number::LocalizedNumberFormatter>
bound_format: JSFunction|Undefined;
- flags: Smi;
+}
+
+type JSPluralRulesType extends int32 constexpr 'JSPluralRules::Type';
+bitfield struct JSPluralRulesFlags extends uint31 {
+ Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
}
extern class JSPluralRules extends JSObject {
locale: String;
- flags: Smi;
+ flags: SmiTagged<JSPluralRulesFlags>;
icu_plural_rules: Foreign; // Managed<icu::PluralRules>
icu_number_formatter:
Foreign; // Managed<icu::number::LocalizedNumberFormatter>
}
+type JSRelativeTimeFormatNumeric extends int32
+constexpr 'JSRelativeTimeFormat::Numeric';
+bitfield struct JSRelativeTimeFormatFlags extends uint31 {
+ numeric: JSRelativeTimeFormatNumeric: 1 bit;
+}
+
extern class JSRelativeTimeFormat extends JSObject {
locale: String;
numberingSystem: String;
icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
- flags: Smi;
+ flags: SmiTagged<JSRelativeTimeFormatFlags>;
}
extern class JSLocale extends JSObject {
icu_locale: Foreign; // Managed<icu::Locale>
}
+type JSSegmenterGranularity extends int32
+constexpr 'JSSegmenter::Granularity';
+bitfield struct JSSegmenterFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
extern class JSSegmenter extends JSObject {
locale: String;
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- flags: Smi;
+ flags: SmiTagged<JSSegmenterFlags>;
+}
+
+bitfield struct JSSegmentIteratorFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+ break_type_set: bool: 1 bit;
}
extern class JSSegmentIterator extends JSObject {
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
unicode_string: Foreign; // Managed<icu::UnicodeString>
- flags: Smi;
+ flags: SmiTagged<JSSegmentIteratorFlags>;
}
extern class JSV8BreakIterator extends JSObject {
@@ -84,7 +127,6 @@ extern class JSV8BreakIterator extends JSObject {
bound_next: Undefined|JSFunction;
bound_current: Undefined|JSFunction;
bound_break_type: Undefined|JSFunction;
- break_iterator_type: Smi;
}
extern class JSCollator extends JSObject {
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 45b9d8c8b0..b77f5580e2 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -46,26 +46,74 @@ void JSArrayBuffer::set_backing_store(void* value) {
ArrayBufferExtension* JSArrayBuffer::extension() const {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+#if V8_COMPRESS_POINTERS
+ // With pointer compression the extension-field might not be
+ // pointer-aligned. However on ARM64 this field needs to be aligned to
+ // perform atomic operations on it. Therefore we split the pointer into two
+ // 32-bit words that we update atomically. We don't have an ABA problem here
+ // since there can never be an Attach() after Detach() (transitions only
+ // from NULL --> some ptr --> NULL).
+
+ // Synchronize with publishing release store of non-null extension
+ uint32_t lo = base::AsAtomic32::Acquire_Load(extension_lo());
+ if (lo & kUninitializedTagMask) return nullptr;
+
+ // Synchronize with release store of null extension
+ uint32_t hi = base::AsAtomic32::Acquire_Load(extension_hi());
+ uint32_t verify_lo = base::AsAtomic32::Relaxed_Load(extension_lo());
+ if (lo != verify_lo) return nullptr;
+
+ uintptr_t address = static_cast<uintptr_t>(lo);
+ address |= static_cast<uintptr_t>(hi) << 32;
+ return reinterpret_cast<ArrayBufferExtension*>(address);
+#else
return base::AsAtomicPointer::Acquire_Load(extension_location());
+#endif
} else {
return nullptr;
}
}
+void JSArrayBuffer::set_extension(ArrayBufferExtension* extension) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+#if V8_COMPRESS_POINTERS
+ if (extension != nullptr) {
+ uintptr_t address = reinterpret_cast<uintptr_t>(extension);
+ base::AsAtomic32::Relaxed_Store(extension_hi(),
+ static_cast<uint32_t>(address >> 32));
+ base::AsAtomic32::Release_Store(extension_lo(),
+ static_cast<uint32_t>(address));
+ } else {
+ base::AsAtomic32::Relaxed_Store(extension_lo(),
+ 0 | kUninitializedTagMask);
+ base::AsAtomic32::Release_Store(extension_hi(), 0);
+ }
+#else
+ base::AsAtomicPointer::Release_Store(extension_location(), extension);
+#endif
+ MarkingBarrierForArrayBufferExtension(*this, extension);
+ } else {
+ CHECK_EQ(extension, nullptr);
+ }
+}
+
ArrayBufferExtension** JSArrayBuffer::extension_location() const {
Address location = field_address(kExtensionOffset);
return reinterpret_cast<ArrayBufferExtension**>(location);
}
-void JSArrayBuffer::set_extension(ArrayBufferExtension* value) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- base::AsAtomicPointer::Release_Store(extension_location(), value);
- MarkingBarrierForArrayBufferExtension(*this, value);
- } else {
- CHECK_EQ(value, nullptr);
- }
+#if V8_COMPRESS_POINTERS
+uint32_t* JSArrayBuffer::extension_lo() const {
+ Address location = field_address(kExtensionOffset);
+ return reinterpret_cast<uint32_t*>(location);
}
+uint32_t* JSArrayBuffer::extension_hi() const {
+ Address location = field_address(kExtensionOffset) + sizeof(uint32_t);
+ return reinterpret_cast<uint32_t*>(location);
+}
+#endif
+
size_t JSArrayBuffer::allocation_length() const {
if (backing_store() == nullptr) {
return 0;
@@ -108,6 +156,7 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory,
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
+
size_t JSArrayBufferView::byte_offset() const {
return ReadField<size_t>(kByteOffsetOffset);
}
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index f4f4320537..0c2aca6d71 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -59,14 +59,18 @@ void JSArrayBuffer::Setup(SharedFlag shared,
void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
DCHECK_NOT_NULL(backing_store);
DCHECK_EQ(is_shared(), backing_store->is_shared());
+ DCHECK(!was_detached());
set_backing_store(backing_store->buffer_start());
set_byte_length(backing_store->byte_length());
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
Heap* heap = GetIsolate()->heap();
- EnsureExtension(heap);
- extension()->set_backing_store(std::move(backing_store));
+ ArrayBufferExtension* extension = EnsureExtension();
+ size_t bytes = backing_store->PerIsolateAccountingLength();
+ extension->set_accounting_length(bytes);
+ extension->set_backing_store(std::move(backing_store));
+ heap->AppendArrayBufferExtension(*this, extension);
} else {
GetIsolate()->heap()->RegisterBackingStore(*this, std::move(backing_store));
}
@@ -113,14 +117,13 @@ std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() {
}
}
-ArrayBufferExtension* JSArrayBuffer::EnsureExtension(Heap* heap) {
+ArrayBufferExtension* JSArrayBuffer::EnsureExtension() {
DCHECK(V8_ARRAY_BUFFER_EXTENSION_BOOL);
- if (extension() != nullptr) return extension();
+ ArrayBufferExtension* extension = this->extension();
+ if (extension != nullptr) return extension;
- ArrayBufferExtension* extension =
- new ArrayBufferExtension(std::shared_ptr<BackingStore>());
+ extension = new ArrayBufferExtension(std::shared_ptr<BackingStore>());
set_extension(extension);
- heap->AppendArrayBufferExtension(*this, extension);
return extension;
}
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 1f3024ef6a..624b716713 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
-#include "src/base/bit-field.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-objects.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -51,14 +51,7 @@ class JSArrayBuffer : public JSObject {
V8_INLINE void clear_padding();
// Bit positions for [bit_field].
-#define JS_ARRAY_BUFFER_BIT_FIELD_FIELDS(V, _) \
- V(IsExternalBit, bool, 1, _) \
- V(IsDetachableBit, bool, 1, _) \
- V(WasDetachedBit, bool, 1, _) \
- V(IsAsmJsMemoryBit, bool, 1, _) \
- V(IsSharedBit, bool, 1, _)
- DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS)
-#undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS
+ DEFINE_TORQUE_GENERATED_JS_ARRAY_BUFFER_FLAGS()
// [is_external]: true indicates that the embedder is in charge of freeing the
// backing_store, while is_external == false means that v8 will free the
@@ -107,7 +100,7 @@ class JSArrayBuffer : public JSObject {
// Allocates an ArrayBufferExtension for this array buffer, unless it is
// already associated with an extension.
- ArrayBufferExtension* EnsureExtension(Heap* heap);
+ ArrayBufferExtension* EnsureExtension();
// Frees the associated ArrayBufferExtension and returns its backing store.
std::shared_ptr<BackingStore> RemoveExtension();
@@ -121,22 +114,10 @@ class JSArrayBuffer : public JSObject {
DECL_PRINTER(JSArrayBuffer)
DECL_VERIFIER(JSArrayBuffer)
-// Layout description.
-#define JS_ARRAY_BUFFER_FIELDS(V) \
- V(kEndOfTaggedFieldsOffset, 0) \
- /* Raw data fields. */ \
- V(kByteLengthOffset, kUIntptrSize) \
- V(kBackingStoreOffset, kSystemPointerSize) \
- V(kExtensionOffset, \
- (V8_ARRAY_BUFFER_EXTENSION_BOOL ? kSystemPointerSize : 0)) \
- V(kBitFieldOffset, kInt32Size) \
- /* Pads header size to be a multiple of kTaggedSize. */ \
- V(kOptionalPaddingOffset, OBJECT_POINTER_PADDING(kOptionalPaddingOffset)) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_ARRAY_BUFFER_FIELDS)
-#undef JS_ARRAY_BUFFER_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JS_ARRAY_BUFFER_FIELDS)
+ static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
static const int kSizeWithEmbedderFields =
kHeaderSize +
@@ -148,6 +129,13 @@ class JSArrayBuffer : public JSObject {
private:
inline ArrayBufferExtension** extension_location() const;
+
+#if V8_COMPRESS_POINTERS
+ static const int kUninitializedTagMask = 1;
+
+ inline uint32_t* extension_lo() const;
+ inline uint32_t* extension_hi() const;
+#endif
};
// Each JSArrayBuffer (with a backing store) has a corresponding native-heap
@@ -163,6 +151,7 @@ class ArrayBufferExtension : public Malloced {
std::atomic<GcState> young_gc_state_;
std::shared_ptr<BackingStore> backing_store_;
ArrayBufferExtension* next_;
+ std::size_t accounting_length_;
GcState young_gc_state() {
return young_gc_state_.load(std::memory_order_relaxed);
@@ -177,12 +166,14 @@ class ArrayBufferExtension : public Malloced {
: marked_(false),
young_gc_state_(GcState::Dead),
backing_store_(std::shared_ptr<BackingStore>()),
- next_(nullptr) {}
+ next_(nullptr),
+ accounting_length_(0) {}
explicit ArrayBufferExtension(std::shared_ptr<BackingStore> backing_store)
: marked_(false),
young_gc_state_(GcState::Dead),
backing_store_(backing_store),
- next_(nullptr) {}
+ next_(nullptr),
+ accounting_length_(0) {}
void Mark() { marked_.store(true, std::memory_order_relaxed); }
void Unmark() { marked_.store(false, std::memory_order_relaxed); }
@@ -198,6 +189,12 @@ class ArrayBufferExtension : public Malloced {
std::shared_ptr<BackingStore> backing_store() { return backing_store_; }
BackingStore* backing_store_raw() { return backing_store_.get(); }
+ size_t accounting_length() { return accounting_length_; }
+
+ void set_accounting_length(size_t accounting_length) {
+ accounting_length_ = accounting_length;
+ }
+
std::shared_ptr<BackingStore> RemoveBackingStore() {
return std::move(backing_store_);
}
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index 92c004d653..9f4f1ba377 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -2,9 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+bitfield struct JSArrayBufferFlags extends uint32 {
+ is_external: bool: 1 bit;
+ is_detachable: bool: 1 bit;
+ was_detached: bool: 1 bit;
+ is_asm_js_memory: bool: 1 bit;
+ is_shared: bool: 1 bit;
+}
+
extern class JSArrayBuffer extends JSObject {
byte_length: uintptr;
backing_store: RawPtr;
+ @if(V8_ARRAY_BUFFER_EXTENSION_BOOL) extension: RawPtr;
+ @ifnot(V8_ARRAY_BUFFER_EXTENSION_BOOL) extension: void;
+ bit_field: JSArrayBufferFlags;
+ // Pads header size to be a multiple of kTaggedSize.
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
+}
+
+@export
+macro IsDetachedBuffer(buffer: JSArrayBuffer): bool {
+ return buffer.bit_field.was_detached;
+}
+
+macro IsSharedArrayBuffer(buffer: JSArrayBuffer): bool {
+ return buffer.bit_field.is_shared;
}
@abstract
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 86e87ddb0d..0dd23edaa5 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -20,14 +20,6 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator, JSObject)
-inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
- return static_cast<JSV8BreakIterator::Type>(raw_type());
-}
-
-inline void JSV8BreakIterator::set_type(Type type) {
- set_raw_type(static_cast<int>(type));
-}
-
ACCESSORS(JSV8BreakIterator, locale, String, kLocaleOffset)
ACCESSORS(JSV8BreakIterator, break_iterator, Managed<icu::BreakIterator>,
kBreakIteratorOffset)
@@ -39,8 +31,6 @@ ACCESSORS(JSV8BreakIterator, bound_next, Object, kBoundNextOffset)
ACCESSORS(JSV8BreakIterator, bound_current, Object, kBoundCurrentOffset)
ACCESSORS(JSV8BreakIterator, bound_break_type, Object, kBoundBreakTypeOffset)
-SMI_ACCESSORS(JSV8BreakIterator, raw_type, kBreakIteratorTypeOffset)
-
CAST_ACCESSOR(JSV8BreakIterator)
} // namespace internal
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 1a9d096411..28db1699f4 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -15,6 +15,10 @@
namespace v8 {
namespace internal {
+namespace {
+enum class Type { CHARACTER, WORD, SENTENCE, LINE };
+} // anonymous namespace
+
MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
Handle<Object> options_obj, const char* service) {
@@ -42,9 +46,14 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSV8BreakIterator>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSV8BreakIterator::GetAvailableLocales(),
requested_locales, matcher, {});
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSV8BreakIterator);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
// Extract type from options
Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
@@ -70,20 +79,24 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
icu::BreakIterator::createSentenceInstance(icu_locale, status));
break;
case Type::LINE:
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kBreakIteratorTypeLine);
break_iterator.reset(
icu::BreakIterator::createLineInstance(icu_locale, status));
break;
default:
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kBreakIteratorTypeWord);
break_iterator.reset(
icu::BreakIterator::createWordInstance(icu_locale, status));
break;
}
// Error handling for break_iterator
- if (U_FAILURE(status)) {
- FATAL("Failed to create ICU break iterator, are ICU data files missing?");
+ if (U_FAILURE(status) || break_iterator.get() == nullptr) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSV8BreakIterator);
}
- CHECK_NOT_NULL(break_iterator.get());
isolate->CountUsage(v8::Isolate::UseCounterFeature::kBreakIterator);
// Construct managed objects from pointers
@@ -102,7 +115,6 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
DisallowHeapAllocation no_gc;
break_iterator_holder->set_locale(*locale_str);
- break_iterator_holder->set_type(type_enum);
break_iterator_holder->set_break_iterator(*managed_break_iterator);
break_iterator_holder->set_unicode_string(*managed_unicode_string);
@@ -110,17 +122,68 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
return break_iterator_holder;
}
+namespace {
+
+Type GetType(icu::BreakIterator* break_iterator) {
+ // Since the developer calling the Intl.v8BreakIterator already know the type,
+ // we usually do not need to know the type unless the resolvedOptions() is
+ // called, we use the following trick to figure out the type instead of
+ // storing it with the JSV8BreakIterator object to save memory.
+ // This routine is not fast but should be seldomly used only.
+
+ // We need to clone a copy of break iteator because we need to setText to it.
+ std::unique_ptr<icu::BreakIterator> cloned_break_iterator(
+ break_iterator->clone());
+ // Use a magic string "He is." to call next().
+ // character type: will return 1 for "H"
+ // word type: will return 2 for "He"
+ // line type: will return 3 for "He "
+ // sentence type: will return 6 for "He is."
+ icu::UnicodeString data("He is.");
+ cloned_break_iterator->setText(data);
+ switch (cloned_break_iterator->next()) {
+ case 1: // After "H"
+ return Type::CHARACTER;
+ case 2: // After "He"
+ return Type::WORD;
+ case 3: // After "He "
+ return Type::LINE;
+ case 6: // After "He is."
+ return Type::SENTENCE;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> TypeAsString(Isolate* isolate, Type type) {
+ switch (type) {
+ case Type::CHARACTER:
+ return ReadOnlyRoots(isolate).character_string_handle();
+ case Type::WORD:
+ return ReadOnlyRoots(isolate).word_string_handle();
+ case Type::SENTENCE:
+ return ReadOnlyRoots(isolate).sentence_string_handle();
+ case Type::LINE:
+ return ReadOnlyRoots(isolate).line_string_handle();
+ }
+ UNREACHABLE();
+}
+
+} // anonymous namespace
+
Handle<JSObject> JSV8BreakIterator::ResolvedOptions(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
Factory* factory = isolate->factory();
+ Type type = GetType(break_iterator->break_iterator().raw());
+
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
Handle<String> locale(break_iterator->locale(), isolate);
JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
NONE);
JSObject::AddProperty(isolate, result, factory->type_string(),
- break_iterator->TypeAsString(), NONE);
+ TypeAsString(isolate, type), NONE);
return result;
}
@@ -135,20 +198,6 @@ void JSV8BreakIterator::AdoptText(
break_iterator_holder->set_unicode_string(*unicode_string);
}
-Handle<String> JSV8BreakIterator::TypeAsString() const {
- switch (type()) {
- case Type::CHARACTER:
- return GetReadOnlyRoots().character_string_handle();
- case Type::WORD:
- return GetReadOnlyRoots().word_string_handle();
- case Type::SENTENCE:
- return GetReadOnlyRoots().sentence_string_handle();
- case Type::LINE:
- return GetReadOnlyRoots().line_string_handle();
- }
- UNREACHABLE();
-}
-
Handle<Object> JSV8BreakIterator::Current(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index ea66fe6732..e21fa9f0eb 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -51,12 +51,6 @@ class JSV8BreakIterator : public JSObject {
static String BreakType(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator);
- enum class Type { CHARACTER, WORD, SENTENCE, LINE };
- inline void set_type(Type type);
- inline Type type() const;
-
- Handle<String> TypeAsString() const;
-
DECL_CAST(JSV8BreakIterator)
DECL_PRINTER(JSV8BreakIterator)
DECL_VERIFIER(JSV8BreakIterator)
@@ -75,8 +69,6 @@ class JSV8BreakIterator : public JSObject {
TORQUE_GENERATED_JS_V8_BREAK_ITERATOR_FIELDS)
private:
- DECL_INT_ACCESSORS(raw_type)
-
OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index 39178b3acf..046fa25e3d 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -37,6 +37,17 @@ enum class Sensitivity {
kUndefined,
};
+// enum for "caseFirst" option.
+enum class CaseFirst { kUndefined, kUpper, kLower, kFalse };
+
+Maybe<CaseFirst> GetCaseFirst(Isolate* isolate, Handle<JSReceiver> options,
+ const char* method) {
+ return Intl::GetStringOption<CaseFirst>(
+ isolate, options, "caseFirst", method, {"upper", "lower", "false"},
+ {CaseFirst::kUpper, CaseFirst::kLower, CaseFirst::kFalse},
+ CaseFirst::kUndefined);
+}
+
// TODO(gsathya): Consider internalizing the value strings.
void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
Handle<String> key, const char* value) {
@@ -202,21 +213,21 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
namespace {
-Intl::CaseFirst ToCaseFirst(const char* str) {
- if (strcmp(str, "upper") == 0) return Intl::CaseFirst::kUpper;
- if (strcmp(str, "lower") == 0) return Intl::CaseFirst::kLower;
- if (strcmp(str, "false") == 0) return Intl::CaseFirst::kFalse;
- return Intl::CaseFirst::kUndefined;
+CaseFirst ToCaseFirst(const char* str) {
+ if (strcmp(str, "upper") == 0) return CaseFirst::kUpper;
+ if (strcmp(str, "lower") == 0) return CaseFirst::kLower;
+ if (strcmp(str, "false") == 0) return CaseFirst::kFalse;
+ return CaseFirst::kUndefined;
}
-UColAttributeValue ToUColAttributeValue(Intl::CaseFirst case_first) {
+UColAttributeValue ToUColAttributeValue(CaseFirst case_first) {
switch (case_first) {
- case Intl::CaseFirst::kUpper:
+ case CaseFirst::kUpper:
return UCOL_UPPER_FIRST;
- case Intl::CaseFirst::kLower:
+ case CaseFirst::kLower:
return UCOL_LOWER_FIRST;
- case Intl::CaseFirst::kFalse:
- case Intl::CaseFirst::kUndefined:
+ case CaseFirst::kFalse:
+ case CaseFirst::kUndefined:
return UCOL_OFF;
}
}
@@ -229,8 +240,7 @@ void SetNumericOption(icu::Collator* icu_collator, bool numeric) {
CHECK(U_SUCCESS(status));
}
-void SetCaseFirstOption(icu::Collator* icu_collator,
- Intl::CaseFirst case_first) {
+void SetCaseFirstOption(icu::Collator* icu_collator, CaseFirst case_first) {
CHECK_NOT_NULL(icu_collator);
UErrorCode status = U_ZERO_ERROR;
icu_collator->setAttribute(UCOL_CASE_FIRST, ToUColAttributeValue(case_first),
@@ -300,10 +310,9 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 14. Let caseFirst be ? GetOption(options, "caseFirst", "string",
// « "upper", "lower", "false" », undefined).
- Maybe<Intl::CaseFirst> maybe_case_first =
- Intl::GetCaseFirst(isolate, options, service);
+ Maybe<CaseFirst> maybe_case_first = GetCaseFirst(isolate, options, service);
MAYBE_RETURN(maybe_case_first, MaybeHandle<JSCollator>());
- Intl::CaseFirst case_first = maybe_case_first.FromJust();
+ CaseFirst case_first = maybe_case_first.FromJust();
// The relevant unicode extensions accepted by Collator as specified here:
// https://tc39.github.io/ecma402/#sec-intl-collator-internal-slots
@@ -314,9 +323,14 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 17. Let r be ResolveLocale(%Collator%.[[AvailableLocales]],
// requestedLocales, opt, %Collator%.[[RelevantExtensionKeys]],
// localeData).
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSCollator::GetAvailableLocales(),
requested_locales, matcher, relevant_extension_keys);
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSCollator);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
// 18. Set collator.[[Locale]] to r.[[locale]].
icu::Locale icu_locale = r.icu_locale;
@@ -364,11 +378,11 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
icu::Collator::createInstance(no_extension_locale, status));
if (U_FAILURE(status) || icu_collator.get() == nullptr) {
- FATAL("Failed to create ICU collator, are ICU data files missing?");
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSCollator);
}
}
DCHECK(U_SUCCESS(status));
- CHECK_NOT_NULL(icu_collator.get());
// 22. If relevantExtensionKeys contains "kn", then
// a. Set collator.[[Numeric]] to ! SameValue(r.[[kn]], "true").
@@ -392,7 +406,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// If the caseFirst value is passed in through the options object,
// then we use it. Otherwise, we check if the caseFirst value is
// passed in through the unicode extensions.
- if (case_first != Intl::CaseFirst::kUndefined) {
+ if (case_first != CaseFirst::kUndefined) {
SetCaseFirstOption(icu_collator.get(), case_first);
} else {
auto kf_extension_it = r.extensions.find("kf");
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 3603a8ff4b..4ab9adb844 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -29,13 +29,13 @@ ACCESSORS(JSDateTimeFormat, icu_date_interval_format,
ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset)
SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset)
-inline void JSDateTimeFormat::set_hour_cycle(Intl::HourCycle hour_cycle) {
+inline void JSDateTimeFormat::set_hour_cycle(HourCycle hour_cycle) {
int hints = flags();
hints = HourCycleBits::update(hints, hour_cycle);
set_flags(hints);
}
-inline Intl::HourCycle JSDateTimeFormat::hour_cycle() const {
+inline JSDateTimeFormat::HourCycle JSDateTimeFormat::hour_cycle() const {
return HourCycleBits::decode(flags());
}
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 06925c68d2..5643ee57d5 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -34,6 +34,24 @@ namespace internal {
namespace {
+JSDateTimeFormat::HourCycle ToHourCycle(const std::string& hc) {
+ if (hc == "h11") return JSDateTimeFormat::HourCycle::kH11;
+ if (hc == "h12") return JSDateTimeFormat::HourCycle::kH12;
+ if (hc == "h23") return JSDateTimeFormat::HourCycle::kH23;
+ if (hc == "h24") return JSDateTimeFormat::HourCycle::kH24;
+ return JSDateTimeFormat::HourCycle::kUndefined;
+}
+
+Maybe<JSDateTimeFormat::HourCycle> GetHourCycle(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method) {
+ return Intl::GetStringOption<JSDateTimeFormat::HourCycle>(
+ isolate, options, "hourCycle", method, {"h11", "h12", "h23", "h24"},
+ {JSDateTimeFormat::HourCycle::kH11, JSDateTimeFormat::HourCycle::kH12,
+ JSDateTimeFormat::HourCycle::kH23, JSDateTimeFormat::HourCycle::kH24},
+ JSDateTimeFormat::HourCycle::kUndefined);
+}
+
class PatternMap {
public:
PatternMap(std::string pattern, std::string value)
@@ -210,29 +228,30 @@ DEFFINE_TRAIT(H24Trait, "kk", "k")
DEFFINE_TRAIT(HDefaultTrait, "jj", "j")
#undef DEFFINE_TRAIT
-const std::vector<PatternData>& GetPatternData(Intl::HourCycle hour_cycle) {
+const std::vector<PatternData>& GetPatternData(
+ JSDateTimeFormat::HourCycle hour_cycle) {
switch (hour_cycle) {
- case Intl::HourCycle::kH11: {
+ case JSDateTimeFormat::HourCycle::kH11: {
static base::LazyInstance<Pattern, H11Trait>::type h11 =
LAZY_INSTANCE_INITIALIZER;
return h11.Pointer()->Get();
}
- case Intl::HourCycle::kH12: {
+ case JSDateTimeFormat::HourCycle::kH12: {
static base::LazyInstance<Pattern, H12Trait>::type h12 =
LAZY_INSTANCE_INITIALIZER;
return h12.Pointer()->Get();
}
- case Intl::HourCycle::kH23: {
+ case JSDateTimeFormat::HourCycle::kH23: {
static base::LazyInstance<Pattern, H23Trait>::type h23 =
LAZY_INSTANCE_INITIALIZER;
return h23.Pointer()->Get();
}
- case Intl::HourCycle::kH24: {
+ case JSDateTimeFormat::HourCycle::kH24: {
static base::LazyInstance<Pattern, H24Trait>::type h24 =
LAZY_INSTANCE_INITIALIZER;
return h24.Pointer()->Get();
}
- case Intl::HourCycle::kUndefined: {
+ case JSDateTimeFormat::HourCycle::kUndefined: {
static base::LazyInstance<Pattern, HDefaultTrait>::type hDefault =
LAZY_INSTANCE_INITIALIZER;
return hDefault.Pointer()->Get();
@@ -320,10 +339,9 @@ std::string ToTitleCaseTimezoneLocation(Isolate* isolate,
return title_cased;
}
-} // namespace
-
-std::string JSDateTimeFormat::CanonicalizeTimeZoneID(Isolate* isolate,
- const std::string& input) {
+// Return the time zone id which match ICU's expectation of title casing
+// return empty string when error.
+std::string CanonicalizeTimeZoneID(Isolate* isolate, const std::string& input) {
std::string upper = input;
transform(upper.begin(), upper.end(), upper.begin(),
LocaleIndependentAsciiToUpper);
@@ -344,8 +362,6 @@ std::string JSDateTimeFormat::CanonicalizeTimeZoneID(Isolate* isolate,
return ToTitleCaseTimezoneLocation(isolate, input);
}
-namespace {
-
Handle<String> DateTimeStyleAsString(Isolate* isolate,
JSDateTimeFormat::DateTimeStyle style) {
switch (style) {
@@ -485,32 +501,32 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
.FromJust());
// 5.b.i. Let hc be dtf.[[HourCycle]].
- Intl::HourCycle hc = date_time_format->hour_cycle();
+ HourCycle hc = date_time_format->hour_cycle();
- if (hc != Intl::HourCycle::kUndefined) {
+ if (hc != HourCycle::kUndefined) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->hourCycle_string(),
date_time_format->HourCycleAsString(), Just(kDontThrow))
.FromJust());
switch (hc) {
// ii. If hc is "h11" or "h12", let v be true.
- case Intl::HourCycle::kH11:
- case Intl::HourCycle::kH12:
+ case HourCycle::kH11:
+ case HourCycle::kH12:
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->hour12_string(),
factory->true_value(), Just(kDontThrow))
.FromJust());
break;
// iii. Else if, hc is "h23" or "h24", let v be false.
- case Intl::HourCycle::kH23:
- case Intl::HourCycle::kH24:
+ case HourCycle::kH23:
+ case HourCycle::kH24:
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->hour12_string(),
factory->false_value(), Just(kDontThrow))
.FromJust());
break;
// iv. Else, let v be undefined.
- case Intl::HourCycle::kUndefined:
+ case HourCycle::kUndefined:
break;
}
}
@@ -860,8 +876,7 @@ std::unique_ptr<icu::TimeZone> CreateTimeZone(Isolate* isolate,
// 19.a. Else / Let timeZone be DefaultTimeZone().
return std::unique_ptr<icu::TimeZone>(icu::TimeZone::createDefault());
}
- std::string canonicalized =
- JSDateTimeFormat::CanonicalizeTimeZoneID(isolate, timezone);
+ std::string canonicalized = CanonicalizeTimeZoneID(isolate, timezone);
if (canonicalized.empty()) return std::unique_ptr<icu::TimeZone>();
std::unique_ptr<icu::TimeZone> tz(
icu::TimeZone::createTimeZone(canonicalized.c_str()));
@@ -925,21 +940,21 @@ icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale,
}
icu::UnicodeString ReplaceHourCycleInPattern(icu::UnicodeString pattern,
- Intl::HourCycle hc) {
+ JSDateTimeFormat::HourCycle hc) {
char16_t replacement;
switch (hc) {
- case Intl::HourCycle::kUndefined:
+ case JSDateTimeFormat::HourCycle::kUndefined:
return pattern;
- case Intl::HourCycle::kH11:
+ case JSDateTimeFormat::HourCycle::kH11:
replacement = 'K';
break;
- case Intl::HourCycle::kH12:
+ case JSDateTimeFormat::HourCycle::kH12:
replacement = 'h';
break;
- case Intl::HourCycle::kH23:
+ case JSDateTimeFormat::HourCycle::kH23:
replacement = 'H';
break;
- case Intl::HourCycle::kH24:
+ case JSDateTimeFormat::HourCycle::kH24:
replacement = 'k';
break;
}
@@ -971,7 +986,7 @@ icu::UnicodeString ReplaceHourCycleInPattern(icu::UnicodeString pattern,
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator* generator, Intl::HourCycle hc) {
+ icu::DateTimePatternGenerator* generator, JSDateTimeFormat::HourCycle hc) {
// See https://github.com/tc39/ecma402/issues/225 . The best pattern
// generation needs to be done in the base locale according to the
// current spec however odd it may be. See also crbug.com/826549 .
@@ -1005,7 +1020,7 @@ class DateFormatCache {
icu::SimpleDateFormat* Create(const icu::Locale& icu_locale,
const icu::UnicodeString& skeleton,
icu::DateTimePatternGenerator* generator,
- Intl::HourCycle hc) {
+ JSDateTimeFormat::HourCycle hc) {
std::string key;
skeleton.toUTF8String<std::string>(key);
key += ":";
@@ -1034,7 +1049,7 @@ class DateFormatCache {
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator* generator, Intl::HourCycle hc) {
+ icu::DateTimePatternGenerator* generator, JSDateTimeFormat::HourCycle hc) {
static base::LazyInstance<DateFormatCache>::type cache =
LAZY_INSTANCE_INITIALIZER;
return std::unique_ptr<icu::SimpleDateFormat>(
@@ -1078,7 +1093,8 @@ icu::DateIntervalFormat* LazyCreateDateIntervalFormat(
return (*managed_interval_format).raw();
}
-Intl::HourCycle HourCycleFromPattern(const icu::UnicodeString pattern) {
+JSDateTimeFormat::HourCycle HourCycleFromPattern(
+ const icu::UnicodeString pattern) {
bool in_quote = false;
for (int32_t i = 0; i < pattern.length(); i++) {
char16_t ch = pattern[i];
@@ -1087,20 +1103,20 @@ Intl::HourCycle HourCycleFromPattern(const icu::UnicodeString pattern) {
in_quote = !in_quote;
break;
case 'K':
- if (!in_quote) return Intl::HourCycle::kH11;
+ if (!in_quote) return JSDateTimeFormat::HourCycle::kH11;
break;
case 'h':
- if (!in_quote) return Intl::HourCycle::kH12;
+ if (!in_quote) return JSDateTimeFormat::HourCycle::kH12;
break;
case 'H':
- if (!in_quote) return Intl::HourCycle::kH23;
+ if (!in_quote) return JSDateTimeFormat::HourCycle::kH23;
break;
case 'k':
- if (!in_quote) return Intl::HourCycle::kH24;
+ if (!in_quote) return JSDateTimeFormat::HourCycle::kH24;
break;
}
}
- return Intl::HourCycle::kUndefined;
+ return JSDateTimeFormat::HourCycle::kUndefined;
}
icu::DateFormat::EStyle DateTimeStyleToEStyle(
@@ -1120,23 +1136,23 @@ icu::DateFormat::EStyle DateTimeStyleToEStyle(
}
icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
- Intl::HourCycle hc) {
+ JSDateTimeFormat::HourCycle hc) {
icu::UnicodeString result;
char16_t to;
switch (hc) {
- case Intl::HourCycle::kH11:
+ case JSDateTimeFormat::HourCycle::kH11:
to = 'K';
break;
- case Intl::HourCycle::kH12:
+ case JSDateTimeFormat::HourCycle::kH12:
to = 'h';
break;
- case Intl::HourCycle::kH23:
+ case JSDateTimeFormat::HourCycle::kH23:
to = 'H';
break;
- case Intl::HourCycle::kH24:
+ case JSDateTimeFormat::HourCycle::kH24:
to = 'k';
break;
- case Intl::HourCycle::kUndefined:
+ case JSDateTimeFormat::HourCycle::kUndefined:
UNREACHABLE();
}
for (int32_t i = 0; i < input.length(); i++) {
@@ -1170,7 +1186,7 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
JSDateTimeFormat::DateTimeStyle date_style,
JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
- Intl::HourCycle hc, icu::DateTimePatternGenerator* generator) {
+ JSDateTimeFormat::HourCycle hc, icu::DateTimePatternGenerator* generator) {
std::unique_ptr<icu::SimpleDateFormat> result;
if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
@@ -1340,15 +1356,14 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11",
// "h12", "h23", "h24" », undefined).
- Maybe<Intl::HourCycle> maybe_hour_cycle =
- Intl::GetHourCycle(isolate, options, service);
+ Maybe<HourCycle> maybe_hour_cycle = GetHourCycle(isolate, options, service);
MAYBE_RETURN(maybe_hour_cycle, MaybeHandle<JSDateTimeFormat>());
- Intl::HourCycle hour_cycle = maybe_hour_cycle.FromJust();
+ HourCycle hour_cycle = maybe_hour_cycle.FromJust();
// 8. If hour12 is not undefined, then
if (maybe_get_hour12.FromJust()) {
// a. Let hourCycle be null.
- hour_cycle = Intl::HourCycle::kUndefined;
+ hour_cycle = HourCycle::kUndefined;
}
// 9. Set opt.[[hc]] to hourCycle.
@@ -1362,9 +1377,14 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// requestedLocales, opt, %DateTimeFormat%.[[RelevantExtensionKeys]],
// localeData).
//
- Intl::ResolvedLocale r = Intl::ResolveLocale(
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale = Intl::ResolveLocale(
isolate, JSDateTimeFormat::GetAvailableLocales(), requested_locales,
locale_matcher, relevant_extension_keys);
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSDateTimeFormat);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
@@ -1441,20 +1461,20 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 15.Let hcDefault be dataLocaleData.[[hourCycle]].
icu::UnicodeString hour_pattern = generator->getBestPattern("jjmm", status);
CHECK(U_SUCCESS(status));
- Intl::HourCycle hc_default = HourCycleFromPattern(hour_pattern);
+ HourCycle hc_default = HourCycleFromPattern(hour_pattern);
// 16.Let hc be r.[[hc]].
- Intl::HourCycle hc = Intl::HourCycle::kUndefined;
- if (hour_cycle == Intl::HourCycle::kUndefined) {
+ HourCycle hc = HourCycle::kUndefined;
+ if (hour_cycle == HourCycle::kUndefined) {
auto hc_extension_it = r.extensions.find("hc");
if (hc_extension_it != r.extensions.end()) {
- hc = Intl::ToHourCycle(hc_extension_it->second.c_str());
+ hc = ToHourCycle(hc_extension_it->second.c_str());
}
} else {
hc = hour_cycle;
}
// 17. If hc is null, then
- if (hc == Intl::HourCycle::kUndefined) {
+ if (hc == HourCycle::kUndefined) {
// a. Set hc to hcDefault.
hc = hc_default;
}
@@ -1464,26 +1484,24 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// a. If hour12 is true, then
if (hour12) {
// i. If hcDefault is "h11" or "h23", then
- if (hc_default == Intl::HourCycle::kH11 ||
- hc_default == Intl::HourCycle::kH23) {
+ if (hc_default == HourCycle::kH11 || hc_default == HourCycle::kH23) {
// 1. Set hc to "h11".
- hc = Intl::HourCycle::kH11;
+ hc = HourCycle::kH11;
// ii. Else,
} else {
// 1. Set hc to "h12".
- hc = Intl::HourCycle::kH12;
+ hc = HourCycle::kH12;
}
// b. Else,
} else {
// ii. If hcDefault is "h11" or "h23", then
- if (hc_default == Intl::HourCycle::kH11 ||
- hc_default == Intl::HourCycle::kH23) {
+ if (hc_default == HourCycle::kH11 || hc_default == HourCycle::kH23) {
// 1. Set hc to "h23".
- hc = Intl::HourCycle::kH23;
+ hc = HourCycle::kH23;
// iii. Else,
} else {
// 1. Set hc to "h24".
- hc = Intl::HourCycle::kH24;
+ hc = HourCycle::kH24;
}
}
}
@@ -1522,6 +1540,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 32. If dateStyle or timeStyle are not undefined, then
if (date_style != DateTimeStyle::kUndefined ||
time_style != DateTimeStyle::kUndefined) {
+ // Track newer feature dateStyle/timeStyle option.
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kDateTimeFormatDateTimeStyle);
+
icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
hc, generator.get());
}
@@ -1583,14 +1605,15 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr,
generator.get(), hc);
if (icu_date_format.get() == nullptr) {
- FATAL("Failed to create ICU date format, are ICU data files missing?");
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSDateTimeFormat);
}
}
// g. If dateTimeFormat.[[Hour]] is not undefined, then
if (!has_hour_option) {
// h. Else, i. Set dateTimeFormat.[[HourCycle]] to undefined.
- hc = Intl::HourCycle::kUndefined;
+ hc = HourCycle::kUndefined;
}
}
@@ -1613,10 +1636,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
//
// See details in https://github.com/tc39/test262/pull/2035
if (maybe_get_hour12.FromJust() ||
- maybe_hour_cycle.FromJust() != Intl::HourCycle::kUndefined) {
+ maybe_hour_cycle.FromJust() != HourCycle::kUndefined) {
auto hc_extension_it = r.extensions.find("hc");
if (hc_extension_it != r.extensions.end()) {
- if (hc != Intl::ToHourCycle(hc_extension_it->second.c_str())) {
+ if (hc != ToHourCycle(hc_extension_it->second.c_str())) {
// Remove -hc- if it does not agree with what we used.
UErrorCode status = U_ZERO_ERROR;
resolved_locale.setUnicodeKeywordValue("hc", nullptr, status);
@@ -1790,15 +1813,15 @@ const std::set<std::string>& JSDateTimeFormat::GetAvailableLocales() {
Handle<String> JSDateTimeFormat::HourCycleAsString() const {
switch (hour_cycle()) {
- case Intl::HourCycle::kUndefined:
+ case HourCycle::kUndefined:
return GetReadOnlyRoots().undefined_string_handle();
- case Intl::HourCycle::kH11:
+ case HourCycle::kH11:
return GetReadOnlyRoots().h11_string_handle();
- case Intl::HourCycle::kH12:
+ case HourCycle::kH12:
return GetReadOnlyRoots().h12_string_handle();
- case Intl::HourCycle::kH23:
+ case HourCycle::kH23:
return GetReadOnlyRoots().h23_string_handle();
- case Intl::HourCycle::kH24:
+ case HourCycle::kH24:
return GetReadOnlyRoots().h24_string_handle();
default:
UNREACHABLE();
@@ -1929,6 +1952,9 @@ MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y,
MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
+ // Track newer feature formateRange and formatRangeToParts
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormatRange);
+
// #sec-partitiondatetimerangepattern
// 1. Let x be TimeClip(x).
x = DateCache::TimeClip(x);
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index ae3783f246..ef50b71dc9 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -48,11 +48,6 @@ class JSDateTimeFormat : public JSObject {
static Maybe<std::string> OptionsToSkeleton(Isolate* isolate,
Handle<JSReceiver> options);
- // Return the time zone id which match ICU's expectation of title casing
- // return empty string when error.
- static std::string CanonicalizeTimeZoneID(Isolate* isolate,
- const std::string& input);
-
// ecma402/#sec-datetime-format-functions
// DateTime Format Functions
V8_WARN_UNUSED_RESULT static MaybeHandle<String> DateTimeFormat(
@@ -98,8 +93,11 @@ class JSDateTimeFormat : public JSObject {
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JS_DATE_TIME_FORMAT_FIELDS)
- inline void set_hour_cycle(Intl::HourCycle hour_cycle);
- inline Intl::HourCycle hour_cycle() const;
+ // enum for "hourCycle" option.
+ enum class HourCycle { kUndefined, kH11, kH12, kH23, kH24 };
+
+ inline void set_hour_cycle(HourCycle hour_cycle);
+ inline HourCycle hour_cycle() const;
inline void set_date_style(DateTimeStyle date_style);
inline DateTimeStyle date_style() const;
@@ -107,20 +105,14 @@ class JSDateTimeFormat : public JSObject {
inline void set_time_style(DateTimeStyle time_style);
inline DateTimeStyle time_style() const;
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(HourCycleBits, Intl::HourCycle, 3, _) \
- V(DateStyleBits, DateTimeStyle, 3, _) \
- V(TimeStyleBits, DateTimeStyle, 3, _)
-
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_DATE_TIME_FORMAT_FLAGS()
- STATIC_ASSERT(Intl::HourCycle::kUndefined <= HourCycleBits::kMax);
- STATIC_ASSERT(Intl::HourCycle::kH11 <= HourCycleBits::kMax);
- STATIC_ASSERT(Intl::HourCycle::kH12 <= HourCycleBits::kMax);
- STATIC_ASSERT(Intl::HourCycle::kH23 <= HourCycleBits::kMax);
- STATIC_ASSERT(Intl::HourCycle::kH24 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::kUndefined <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::kH11 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::kH12 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::kH23 <= HourCycleBits::kMax);
+ STATIC_ASSERT(HourCycle::kH24 <= HourCycleBits::kMax);
STATIC_ASSERT(DateTimeStyle::kUndefined <= DateStyleBits::kMax);
STATIC_ASSERT(DateTimeStyle::kFull <= DateStyleBits::kMax);
diff --git a/deps/v8/src/objects/js-display-names-inl.h b/deps/v8/src/objects/js-display-names-inl.h
index 08ad7c0838..103f3b388c 100644
--- a/deps/v8/src/objects/js-display-names-inl.h
+++ b/deps/v8/src/objects/js-display-names-inl.h
@@ -37,14 +37,14 @@ inline JSDisplayNames::Style JSDisplayNames::style() const {
}
inline void JSDisplayNames::set_fallback(Fallback fallback) {
- DCHECK_GE(FallbackBits::kMax, fallback);
+ DCHECK_GE(FallbackBit::kMax, fallback);
int hints = flags();
- hints = FallbackBits::update(hints, fallback);
+ hints = FallbackBit::update(hints, fallback);
set_flags(hints);
}
inline JSDisplayNames::Fallback JSDisplayNames::fallback() const {
- return FallbackBits::decode(flags());
+ return FallbackBit::decode(flags());
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index 6350d5e98b..95f4641173 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -31,6 +31,7 @@
namespace v8 {
namespace internal {
+namespace {
// Type: identifying the types of the display names.
//
// ecma402/#sec-properties-of-intl-displaynames-instances
@@ -70,6 +71,8 @@ UDisplayContext ToUDisplayContext(JSDisplayNames::Style style) {
}
}
+} // anonymous namespace
+
// Abstract class for all different types.
class DisplayNamesInternal {
public:
@@ -82,6 +85,8 @@ class DisplayNamesInternal {
virtual const char* calendar() const { return nullptr; }
};
+namespace {
+
class LocaleDisplayNamesCommon : public DisplayNamesInternal {
public:
LocaleDisplayNamesCommon(const icu::Locale& locale,
@@ -474,6 +479,9 @@ DisplayNamesInternal* CreateInternal(const icu::Locale& locale,
UNREACHABLE();
}
}
+
+} // anonymous namespace
+
// ecma402 #sec-Intl.DisplayNames
MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
Handle<Map> map,
@@ -548,11 +556,16 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
std::set<std::string> relevant_extension_keys = {};
// 13. Let r be ResolveLocale(%DisplayNames%.[[AvailableLocales]],
// requestedLocales, opt, %DisplayNames%.[[RelevantExtensionKeys]]).
- Intl::ResolvedLocale r = Intl::ResolveLocale(
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale = Intl::ResolveLocale(
isolate, JSDisplayNames::GetAvailableLocales(), requested_locales,
matcher,
FLAG_harmony_intl_displaynames_date_types ? relevant_extension_keys_ca
: relevant_extension_keys);
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSDisplayNames);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-display-names.h b/deps/v8/src/objects/js-display-names.h
index f3fc9c9282..c656a25779 100644
--- a/deps/v8/src/objects/js-display-names.h
+++ b/deps/v8/src/objects/js-display-names.h
@@ -67,18 +67,14 @@ class JSDisplayNames : public JSObject {
DECL_CAST(JSDisplayNames)
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(StyleBits, Style, 2, _) \
- V(FallbackBits, Fallback, 1, _)
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_DISPLAY_NAMES_FLAGS()
STATIC_ASSERT(Style::kLong <= StyleBits::kMax);
STATIC_ASSERT(Style::kShort <= StyleBits::kMax);
STATIC_ASSERT(Style::kNarrow <= StyleBits::kMax);
- STATIC_ASSERT(Fallback::kCode <= FallbackBits::kMax);
- STATIC_ASSERT(Fallback::kNone <= FallbackBits::kMax);
+ STATIC_ASSERT(Fallback::kCode <= FallbackBit::kMax);
+ STATIC_ASSERT(Fallback::kNone <= FallbackBit::kMax);
// [flags] Bit field containing various flags about the function.
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index e3c57198c4..2d5e9fe03e 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -21,11 +21,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFunctionObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncGeneratorObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(AsyncGeneratorRequest)
-TQ_SMI_ACCESSORS(JSGeneratorObject, resume_mode)
-TQ_SMI_ACCESSORS(JSGeneratorObject, continuation)
-
-TQ_SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode)
-
bool JSGeneratorObject::is_suspended() const {
DCHECK_LT(kGeneratorExecuting, 0);
DCHECK_LT(kGeneratorClosed, 0);
@@ -40,8 +35,6 @@ bool JSGeneratorObject::is_executing() const {
return continuation() == kGeneratorExecuting;
}
-TQ_SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index 12063c7335..bf35595fdd 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -20,17 +20,8 @@ class JSPromise;
class JSGeneratorObject
: public TorqueGeneratedJSGeneratorObject<JSGeneratorObject, JSObject> {
public:
- // [resume_mode]: The most recent resume mode.
enum ResumeMode { kNext, kReturn, kThrow };
- DECL_INT_ACCESSORS(resume_mode)
-
- // [continuation]
- //
- // A positive value indicates a suspended generator. The special
- // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
- // cannot be resumed.
- inline int continuation() const;
- inline void set_continuation(int continuation);
+
inline bool is_closed() const;
inline bool is_executing() const;
inline bool is_suspended() const;
@@ -68,10 +59,6 @@ class JSAsyncGeneratorObject
DECL_VERIFIER(JSAsyncGeneratorObject)
DECL_PRINTER(JSAsyncGeneratorObject)
- // [is_awaiting]
- // Whether or not the generator is currently awaiting.
- DECL_INT_ACCESSORS(is_awaiting)
-
TQ_OBJECT_CONSTRUCTORS(JSAsyncGeneratorObject)
};
@@ -79,8 +66,6 @@ class AsyncGeneratorRequest
: public TorqueGeneratedAsyncGeneratorRequest<AsyncGeneratorRequest,
Struct> {
public:
- DECL_INT_ACCESSORS(resume_mode)
-
DECL_PRINTER(AsyncGeneratorRequest)
DECL_VERIFIER(AsyncGeneratorRequest)
diff --git a/deps/v8/src/objects/js-generator.tq b/deps/v8/src/objects/js-generator.tq
index bb8df21f0a..51725dc964 100644
--- a/deps/v8/src/objects/js-generator.tq
+++ b/deps/v8/src/objects/js-generator.tq
@@ -14,7 +14,12 @@ extern class JSGeneratorObject extends JSObject {
// suspended generator.
input_or_debug_pos: Object;
+ // The most recent resume mode.
resume_mode: Smi;
+
+ // A positive value indicates a suspended generator. The special
+ // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
+ // cannot be resumed.
continuation: Smi;
// Saved interpreter register file.
@@ -31,6 +36,7 @@ extern class JSAsyncGeneratorObject extends JSGeneratorObject {
// Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
// undefined.
queue: HeapObject;
+ // Whether or not the generator is currently awaiting.
is_awaiting: Smi;
}
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 90ab9ebc43..978f0ea38f 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -75,38 +75,6 @@ const char* GetIcuStyleString(JSListFormat::Style style,
} // namespace
-JSListFormat::Style get_style(const char* str) {
- switch (str[0]) {
- case 'n':
- if (strcmp(&str[1], "arrow") == 0) return JSListFormat::Style::NARROW;
- break;
- case 'l':
- if (strcmp(&str[1], "ong") == 0) return JSListFormat::Style::LONG;
- break;
- case 's':
- if (strcmp(&str[1], "hort") == 0) return JSListFormat::Style::SHORT;
- break;
- }
- UNREACHABLE();
-}
-
-JSListFormat::Type get_type(const char* str) {
- switch (str[0]) {
- case 'c':
- if (strcmp(&str[1], "onjunction") == 0)
- return JSListFormat::Type::CONJUNCTION;
- break;
- case 'd':
- if (strcmp(&str[1], "isjunction") == 0)
- return JSListFormat::Type::DISJUNCTION;
- break;
- case 'u':
- if (strcmp(&str[1], "nit") == 0) return JSListFormat::Type::UNIT;
- break;
- }
- UNREACHABLE();
-}
-
MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
Handle<Object> locales,
Handle<Object> input_options) {
@@ -144,9 +112,14 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 10. Let r be ResolveLocale(%ListFormat%.[[AvailableLocales]],
// requestedLocales, opt, undefined, localeData).
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSListFormat::GetAvailableLocales(),
requested_locales, matcher, {});
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSListFormat);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
@@ -171,11 +144,11 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
UErrorCode status = U_ZERO_ERROR;
icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
icu_locale, GetIcuStyleString(style_enum, type_enum), status);
- if (U_FAILURE(status)) {
+ if (U_FAILURE(status) || formatter == nullptr) {
delete formatter;
- FATAL("Failed to create ICU list formatter, are ICU data files missing?");
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSListFormat);
}
- CHECK_NOT_NULL(formatter);
Handle<Managed<icu::ListFormatter>> managed_formatter =
Managed<icu::ListFormatter>::FromRawPtr(isolate, 0, formatter);
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index c2392969d3..0040bccb97 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -83,12 +83,8 @@ class JSListFormat : public JSObject {
inline void set_type(Type type);
inline Type type() const;
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(StyleBits, Style, 2, _) \
- V(TypeBits, Type, 2, _)
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_LIST_FORMAT_FLAGS()
STATIC_ASSERT(Style::LONG <= StyleBits::kMax);
STATIC_ASSERT(Style::SHORT <= StyleBits::kMax);
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index f373013e61..9cd16c35ed 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -21,25 +21,11 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat, JSObject)
ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset)
-ACCESSORS(JSNumberFormat, numberingSystem, String, kNumberingSystemOffset)
ACCESSORS(JSNumberFormat, icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>,
kIcuNumberFormatterOffset)
ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset)
-SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset)
-
-inline void JSNumberFormat::set_style(Style style) {
- DCHECK_GE(StyleBits::kMax, style);
- int hints = flags();
- hints = StyleBits::update(hints, style);
- set_flags(hints);
-}
-
-inline JSNumberFormat::Style JSNumberFormat::style() const {
- return StyleBits::decode(flags());
-}
-
CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index ced408aa17..7d50f947f7 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -21,6 +21,7 @@
#include "unicode/nounit.h"
#include "unicode/numberformatter.h"
#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
#include "unicode/ucurr.h"
#include "unicode/uloc.h"
#include "unicode/unumberformatter.h"
@@ -31,6 +32,10 @@ namespace internal {
namespace {
+// [[Style]] is one of the values "decimal", "percent", "currency",
+// or "unit" identifying the style of the number format.
+enum class Style { DECIMAL, PERCENT, CURRENCY, UNIT };
+
// [[CurrencyDisplay]] is one of the values "code", "symbol", "name",
// or "narrowSymbol" identifying the display of the currency number format.
enum class CurrencyDisplay {
@@ -305,15 +310,15 @@ bool IsWellFormedCurrencyCode(const std::string& currency) {
}
// Return the style as a String.
-Handle<String> StyleAsString(Isolate* isolate, JSNumberFormat::Style style) {
+Handle<String> StyleAsString(Isolate* isolate, Style style) {
switch (style) {
- case JSNumberFormat::Style::PERCENT:
+ case Style::PERCENT:
return ReadOnlyRoots(isolate).percent_string_handle();
- case JSNumberFormat::Style::CURRENCY:
+ case Style::CURRENCY:
return ReadOnlyRoots(isolate).currency_string_handle();
- case JSNumberFormat::Style::UNIT:
+ case Style::UNIT:
return ReadOnlyRoots(isolate).unit_string_handle();
- case JSNumberFormat::Style::DECIMAL:
+ case Style::DECIMAL:
return ReadOnlyRoots(isolate).decimal_string_handle();
}
UNREACHABLE();
@@ -348,13 +353,25 @@ bool UseGroupingFromSkeleton(const icu::UnicodeString& skeleton) {
// Parse currency code from skeleton. For example, skeleton as
// "currency/TWD .00 rounding-mode-half-up unit-width-full-name;"
-std::string CurrencyFromSkeleton(const icu::UnicodeString& skeleton) {
- std::string str;
- str = skeleton.toUTF8String<std::string>(str);
- std::string search("currency/");
- size_t index = str.find(search);
- if (index == str.npos) return "";
- return str.substr(index + search.size(), 3);
+const icu::UnicodeString CurrencyFromSkeleton(
+ const icu::UnicodeString& skeleton) {
+ const char currency[] = "currency/";
+ int32_t index = skeleton.indexOf(currency);
+ if (index < 0) return "";
+ index += static_cast<int32_t>(std::strlen(currency));
+ return skeleton.tempSubString(index, 3);
+}
+
+const icu::UnicodeString NumberingSystemFromSkeleton(
+ const icu::UnicodeString& skeleton) {
+ const char numbering_system[] = "numbering-system/";
+ int32_t index = skeleton.indexOf(numbering_system);
+ if (index < 0) return "latn";
+ index += static_cast<int32_t>(std::strlen(numbering_system));
+ const icu::UnicodeString res = skeleton.tempSubString(index);
+ index = res.indexOf(" ");
+ if (index < 0) return res;
+ return res.tempSubString(0, index);
}
// Return CurrencySign as string based on skeleton.
@@ -553,13 +570,13 @@ namespace {
std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
std::string str;
str = skeleton.toUTF8String<std::string>(str);
- // Special case for "percent" first.
- if (str.find("percent") != str.npos) {
- return "percent";
- }
std::string search("measure-unit/");
size_t begin = str.find(search);
if (begin == str.npos) {
+ // Special case for "percent".
+ if (str.find("percent") != str.npos) {
+ return "percent";
+ }
return "";
}
// Skip the type (ex: "length").
@@ -611,6 +628,19 @@ std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
return result + "-per-" + str.substr(begin, end - begin);
}
+Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
+ if (skeleton.indexOf("currency/") >= 0) {
+ return Style::CURRENCY;
+ }
+ if (skeleton.indexOf("measure-unit/") >= 0) {
+ return Style::UNIT;
+ }
+ if (skeleton.indexOf("percent ") >= 0) {
+ return Style::PERCENT;
+ }
+ return Style::DECIMAL;
+}
+
} // anonymous namespace
icu::number::LocalizedNumberFormatter
@@ -656,8 +686,8 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
Handle<String> locale = Handle<String>(number_format->locale(), isolate);
- Handle<String> numberingSystem =
- Handle<String>(number_format->numberingSystem(), isolate);
+ const icu::UnicodeString numberingSystem_ustr =
+ NumberingSystemFromSkeleton(skeleton);
// 5. For each row of Table 4, except the header row, in table order, do
// Table 4: Resolved Options of NumberFormat Instances
// Internal Slot Property
@@ -676,21 +706,25 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
factory->locale_string(), locale,
Just(kDontThrow))
.FromJust());
+ Handle<String> numberingSystem_string;
+ CHECK(Intl::ToString(isolate, numberingSystem_ustr)
+ .ToHandle(&numberingSystem_string));
CHECK(JSReceiver::CreateDataProperty(isolate, options,
factory->numberingSystem_string(),
- numberingSystem, Just(kDontThrow))
+ numberingSystem_string, Just(kDontThrow))
.FromJust());
- JSNumberFormat::Style style = number_format->style();
+ Style style = StyleFromSkeleton(skeleton);
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->style_string(),
StyleAsString(isolate, style), Just(kDontThrow))
.FromJust());
- std::string currency = CurrencyFromSkeleton(skeleton);
- if (!currency.empty()) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->currency_string(),
- factory->NewStringFromAsciiChecked(currency.c_str()),
- Just(kDontThrow))
+ const icu::UnicodeString currency_ustr = CurrencyFromSkeleton(skeleton);
+ if (!currency_ustr.isEmpty()) {
+ Handle<String> currency_string;
+ CHECK(Intl::ToString(isolate, currency_ustr).ToHandle(&currency_string));
+ CHECK(JSReceiver::CreateDataProperty(isolate, options,
+ factory->currency_string(),
+ currency_string, Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
@@ -703,20 +737,20 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
.FromJust());
}
- if (style == JSNumberFormat::Style::UNIT) {
- std::string unit = UnitFromSkeleton(skeleton);
- if (!unit.empty()) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->unit_string(),
- isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
- Just(kDontThrow))
- .FromJust());
- }
+ if (style == Style::UNIT) {
+ std::string unit = UnitFromSkeleton(skeleton);
+ if (!unit.empty()) {
CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->unitDisplay_string(),
- UnitDisplayString(isolate, skeleton), Just(kDontThrow))
+ isolate, options, factory->unit_string(),
+ isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
+ Just(kDontThrow))
.FromJust());
}
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unitDisplay_string(),
+ UnitDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
CHECK(
JSReceiver::CreateDataProperty(
@@ -857,9 +891,14 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]],
// localeData).
std::set<std::string> relevant_extension_keys{"nu"};
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSNumberFormat::GetAvailableLocales(),
requested_locales, matcher, relevant_extension_keys);
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSNumberFormat);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
@@ -884,9 +923,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
CHECK(U_SUCCESS(status));
}
- Handle<String> numberingSystem_str =
- isolate->factory()->NewStringFromAsciiChecked(
- Intl::GetNumberingSystem(icu_locale).c_str());
+ std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
// 11. Let dataLocale be r.[[dataLocale]].
@@ -894,22 +931,31 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
icu::number::NumberFormatter::withLocale(icu_locale)
.roundingMode(UNUM_ROUND_HALFUP);
- // 12. Let style be ? GetOption(options, "style", "string", « "decimal",
+ // For 'latn' numbering system, skip the adoptSymbols which would cause
+ // 10.1%-13.7% of regression of JSTests/Intl-NewIntlNumberFormat
+ // See crbug/1052751 so we skip calling adoptSymbols and depending on the
+ // default instead.
+ if (!numbering_system.empty() && numbering_system != "latn") {
+ icu_number_formatter = icu_number_formatter.adoptSymbols(
+ icu::NumberingSystem::createInstanceByName(numbering_system.c_str(),
+ status));
+ CHECK(U_SUCCESS(status));
+ }
+
+ // 3. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency", "unit" », "decimal").
- Maybe<JSNumberFormat::Style> maybe_style =
- Intl::GetStringOption<JSNumberFormat::Style>(
- isolate, options, "style", service,
- {"decimal", "percent", "currency", "unit"},
- {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT,
- JSNumberFormat::Style::CURRENCY, JSNumberFormat::Style::UNIT},
- JSNumberFormat::Style::DECIMAL);
+ Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ isolate, options, "style", service,
+ {"decimal", "percent", "currency", "unit"},
+ {Style::DECIMAL, Style::PERCENT, Style::CURRENCY, Style::UNIT},
+ Style::DECIMAL);
MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
- JSNumberFormat::Style style = maybe_style.FromJust();
+ Style style = maybe_style.FromJust();
- // 13. Set numberFormat.[[Style]] to style.
+ // 4. Set intlObj.[[Style]] to style.
- // 14. Let currency be ? GetOption(options, "currency", "string", undefined,
+ // 5. Let currency be ? GetOption(options, "currency", "string", undefined,
// undefined).
std::unique_ptr<char[]> currency_cstr;
const std::vector<const char*> empty_values = {};
@@ -918,11 +964,11 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
MAYBE_RETURN(found_currency, MaybeHandle<JSNumberFormat>());
std::string currency;
- // 15. If currency is not undefined, then
+ // 6. If currency is not undefined, then
if (found_currency.FromJust()) {
DCHECK_NOT_NULL(currency_cstr.get());
currency = currency_cstr.get();
- // 15. a. If the result of IsWellFormedCurrencyCode(currency) is false,
+ // 6. a. If the result of IsWellFormedCurrencyCode(currency) is false,
// throw a RangeError exception.
if (!IsWellFormedCurrencyCode(currency)) {
THROW_NEW_ERROR(
@@ -934,25 +980,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
}
- // 16. If style is "currency" and currency is undefined, throw a TypeError
- // exception.
- if (style == JSNumberFormat::Style::CURRENCY && !found_currency.FromJust()) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCurrencyCode),
- JSNumberFormat);
- }
- // 17. If style is "currency", then
- int c_digits = 0;
- icu::UnicodeString currency_ustr;
- if (style == JSNumberFormat::Style::CURRENCY) {
- // a. Let currency be the result of converting currency to upper case as
- // specified in 6.1
- std::transform(currency.begin(), currency.end(), currency.begin(), toupper);
- // c. Let cDigits be CurrencyDigits(currency).
- currency_ustr = currency.c_str();
- c_digits = CurrencyDigits(currency_ustr);
- }
-
- // 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
+ // 7. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
// "string", « "code", "symbol", "name", "narrowSymbol" », "symbol").
Maybe<CurrencyDisplay> maybe_currency_display =
Intl::GetStringOption<CurrencyDisplay>(
@@ -965,7 +993,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
CurrencyDisplay currency_display = maybe_currency_display.FromJust();
CurrencySign currency_sign = CurrencySign::STANDARD;
- // Let currencySign be ? GetOption(options, "currencySign", "string", «
+ // 8. Let currencySign be ? GetOption(options, "currencySign", "string", «
// "standard", "accounting" », "standard").
Maybe<CurrencySign> maybe_currency_sign = Intl::GetStringOption<CurrencySign>(
isolate, options, "currencySign", service, {"standard", "accounting"},
@@ -974,7 +1002,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>());
currency_sign = maybe_currency_sign.FromJust();
- // Let unit be ? GetOption(options, "unit", "string", undefined, undefined).
+ // 9. Let unit be ? GetOption(options, "unit", "string", undefined,
+ // undefined).
std::unique_ptr<char[]> unit_cstr;
Maybe<bool> found_unit = Intl::GetStringOption(
isolate, options, "unit", empty_values, service, &unit_cstr);
@@ -985,8 +1014,21 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
DCHECK_NOT_NULL(unit_cstr.get());
unit = unit_cstr.get();
}
-
- // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
+ // 10. If unit is not undefined, then
+ // 10.a If the result of IsWellFormedUnitIdentifier(unit) is false, throw a
+ // RangeError exception.
+ Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed_unit =
+ IsWellFormedUnitIdentifier(isolate, unit);
+ if (found_unit.FromJust() && maybe_wellformed_unit.IsNothing()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidUnit,
+ factory->NewStringFromAsciiChecked(service),
+ factory->NewStringFromAsciiChecked(unit.c_str())),
+ JSNumberFormat);
+ }
+
+ // 11. Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
// "short", "narrow", "long" », "short").
Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
@@ -995,9 +1037,46 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
UnitDisplay unit_display = maybe_unit_display.FromJust();
- // If style is "unit", then
- if (style == JSNumberFormat::Style::UNIT) {
- // If unit is undefined, throw a TypeError exception.
+ // 12. If style is "currency", then
+ icu::UnicodeString currency_ustr;
+ if (style == Style::CURRENCY) {
+ // 12.a. If currency is undefined, throw a TypeError exception.
+ if (!found_currency.FromJust()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCurrencyCode),
+ JSNumberFormat);
+ }
+ // 12.b. Let currency be the result of converting currency to upper case as
+ // specified in 6.1
+ std::transform(currency.begin(), currency.end(), currency.begin(), toupper);
+ currency_ustr = currency.c_str();
+
+ // 12.c. Set numberFormat.[[Currency]] to currency.
+ if (!currency_ustr.isEmpty()) {
+ Handle<String> currency_string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, currency_string,
+ Intl::ToString(isolate, currency_ustr),
+ JSNumberFormat);
+
+ icu_number_formatter = icu_number_formatter.unit(
+ icu::CurrencyUnit(currency_ustr.getBuffer(), status));
+ CHECK(U_SUCCESS(status));
+ // 12.d Set intlObj.[[CurrencyDisplay]] to currencyDisplay.
+ // The default unitWidth is SHORT in ICU and that mapped from
+ // Symbol so we can skip the setting for optimization.
+ if (currency_display != CurrencyDisplay::SYMBOL) {
+ icu_number_formatter = icu_number_formatter.unitWidth(
+ ToUNumberUnitWidth(currency_display));
+ }
+ CHECK(U_SUCCESS(status));
+ }
+ }
+
+ // 13. If style is "unit", then
+ if (style == Style::UNIT) {
+ // Track newer style "unit".
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberFormatStyleUnit);
+
+ // 13.a If unit is undefined, throw a TypeError exception.
if (unit == "") {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kInvalidUnit,
@@ -1006,22 +1085,10 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
JSNumberFormat);
}
- // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a
- // RangeError exception.
- Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed =
- IsWellFormedUnitIdentifier(isolate, unit);
- if (maybe_wellformed.IsNothing()) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kInvalidUnit,
- factory->NewStringFromAsciiChecked(service),
- factory->NewStringFromAsciiChecked(unit.c_str())),
- JSNumberFormat);
- }
std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair =
- maybe_wellformed.FromJust();
+ maybe_wellformed_unit.FromJust();
- // Set intlObj.[[Unit]] to unit.
+ // 13.b Set intlObj.[[Unit]] to unit.
if (unit_pair.first != icu::NoUnit::base()) {
icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
}
@@ -1037,40 +1104,18 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
}
- if (style == JSNumberFormat::Style::PERCENT) {
+ if (style == Style::PERCENT) {
icu_number_formatter = icu_number_formatter.unit(icu::NoUnit::percent())
.scale(icu::number::Scale::powerOfTen(2));
}
- if (style == JSNumberFormat::Style::CURRENCY) {
- // 19. If style is "currency", set numberFormat.[[CurrencyDisplay]] to
- // currencyDisplay.
-
- // 17.b. Set numberFormat.[[Currency]] to currency.
- if (!currency_ustr.isEmpty()) {
- Handle<String> currency_string;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, currency_string,
- Intl::ToString(isolate, currency_ustr),
- JSNumberFormat);
-
- icu_number_formatter = icu_number_formatter.unit(
- icu::CurrencyUnit(currency_ustr.getBuffer(), status));
- CHECK(U_SUCCESS(status));
- // The default unitWidth is SHORT in ICU and that mapped from
- // Symbol so we can skip the setting for optimization.
- if (currency_display != CurrencyDisplay::SYMBOL) {
- icu_number_formatter = icu_number_formatter.unitWidth(
- ToUNumberUnitWidth(currency_display));
- }
- CHECK(U_SUCCESS(status));
- }
- }
-
// 23. If style is "currency", then
int mnfd_default, mxfd_default;
- if (style == JSNumberFormat::Style::CURRENCY) {
- // a. Let mnfdDefault be cDigits.
- // b. Let mxfdDefault be cDigits.
+ if (style == Style::CURRENCY) {
+ // b. Let cDigits be CurrencyDigits(currency).
+ int c_digits = CurrencyDigits(currency_ustr);
+ // c. Let mnfdDefault be cDigits.
+ // d. Let mxfdDefault be cDigits.
mnfd_default = c_digits;
mxfd_default = c_digits;
// 24. Else,
@@ -1078,7 +1123,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// a. Let mnfdDefault be 0.
mnfd_default = 0;
// b. If style is "percent", then
- if (style == JSNumberFormat::Style::PERCENT) {
+ if (style == Style::PERCENT) {
// i. Let mxfdDefault be 0.
mxfd_default = 0;
} else {
@@ -1184,10 +1229,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>::cast(
isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
DisallowHeapAllocation no_gc;
- number_format->set_flags(0);
- number_format->set_style(style);
number_format->set_locale(*locale_str);
- number_format->set_numberingSystem(*numberingSystem_str);
number_format->set_icu_number_formatter(*managed_number_formatter);
number_format->set_bound_format(*factory->undefined_value());
@@ -1197,31 +1239,44 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
namespace {
-Maybe<bool> IcuFormatNumber(
+Maybe<icu::UnicodeString> IcuFormatNumber(
Isolate* isolate,
const icu::number::LocalizedNumberFormatter& number_format,
- Handle<Object> numeric_obj, icu::number::FormattedNumber* formatted) {
+ Handle<Object> numeric_obj, icu::FieldPositionIterator* fp_iter) {
// If it is BigInt, handle it differently.
UErrorCode status = U_ZERO_ERROR;
+ icu::number::FormattedNumber formatted;
if (numeric_obj->IsBigInt()) {
Handle<BigInt> big_int = Handle<BigInt>::cast(numeric_obj);
Handle<String> big_int_string;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, big_int_string,
BigInt::ToString(isolate, big_int),
- Nothing<bool>());
- *formatted = number_format.formatDecimal(
+ Nothing<icu::UnicodeString>());
+ formatted = number_format.formatDecimal(
{big_int_string->ToCString().get(), big_int_string->length()}, status);
} else {
- double number = numeric_obj->Number();
- *formatted = number_format.formatDouble(number, status);
+ double number = numeric_obj->IsNaN()
+ ? std::numeric_limits<double>::quiet_NaN()
+ : numeric_obj->Number();
+ formatted = number_format.formatDouble(number, status);
}
if (U_FAILURE(status)) {
// This happen because of icu data trimming trim out "unit".
// See https://bugs.chromium.org/p/v8/issues/detail?id=8641
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewTypeError(MessageTemplate::kIcuError), Nothing<bool>());
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kIcuError),
+ Nothing<icu::UnicodeString>());
+ }
+ if (fp_iter) {
+ formatted.getAllFieldPositions(*fp_iter, status);
+ }
+ icu::UnicodeString result = formatted.toString(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kIcuError),
+ Nothing<icu::UnicodeString>());
}
- return Just(true);
+ return Just(result);
}
} // namespace
@@ -1232,16 +1287,10 @@ MaybeHandle<String> JSNumberFormat::FormatNumeric(
Handle<Object> numeric_obj) {
DCHECK(numeric_obj->IsNumeric());
- icu::number::FormattedNumber formatted;
- Maybe<bool> maybe_format =
- IcuFormatNumber(isolate, number_format, numeric_obj, &formatted);
+ Maybe<icu::UnicodeString> maybe_format =
+ IcuFormatNumber(isolate, number_format, numeric_obj, nullptr);
MAYBE_RETURN(maybe_format, Handle<String>());
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString result = formatted.toString(status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
- }
- return Intl::ToString(isolate, result);
+ return Intl::ToString(isolate, maybe_format.FromJust());
}
namespace {
@@ -1354,18 +1403,12 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
}
namespace {
-Maybe<int> ConstructParts(Isolate* isolate,
- icu::number::FormattedNumber* formatted,
+Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
+ icu::FieldPositionIterator* fp_iter,
Handle<JSArray> result, int start_index,
Handle<Object> numeric_obj, bool style_is_unit) {
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString formatted_text = formatted->toString(status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewTypeError(MessageTemplate::kIcuError), Nothing<int>());
- }
DCHECK(numeric_obj->IsNumeric());
- int32_t length = formatted_text.length();
+ int32_t length = formatted.length();
int index = start_index;
if (length == 0) return Just(index);
@@ -1374,14 +1417,13 @@ Maybe<int> ConstructParts(Isolate* isolate,
// other region covers some part of the formatted string. It's possible
// there's another field with exactly the same begin and end as this backdrop,
// in which case the backdrop's field_id of -1 will give it lower priority.
- regions.push_back(NumberFormatSpan(-1, 0, formatted_text.length()));
+ regions.push_back(NumberFormatSpan(-1, 0, formatted.length()));
{
- icu::ConstrainedFieldPosition cfp;
- cfp.constrainCategory(UFIELD_CATEGORY_NUMBER);
- while (formatted->nextPosition(cfp, status)) {
- regions.push_back(
- NumberFormatSpan(cfp.getField(), cfp.getStart(), cfp.getLimit()));
+ icu::FieldPosition fp;
+ while (fp_iter->next(fp)) {
+ regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
+ fp.getEndIndex()));
}
}
@@ -1403,7 +1445,7 @@ Maybe<int> ConstructParts(Isolate* isolate,
Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, substring,
- Intl::ToString(isolate, formatted_text, part.begin_pos, part.end_pos),
+ Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
Nothing<int>());
Intl::AddElement(isolate, result, index, field_type_string, substring);
++index;
@@ -1423,15 +1465,20 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
number_format->icu_number_formatter().raw();
CHECK_NOT_NULL(fmt);
- icu::number::FormattedNumber formatted;
- Maybe<bool> maybe_format =
- IcuFormatNumber(isolate, *fmt, numeric_obj, &formatted);
+ icu::FieldPositionIterator fp_iter;
+ Maybe<icu::UnicodeString> maybe_format =
+ IcuFormatNumber(isolate, *fmt, numeric_obj, &fp_iter);
MAYBE_RETURN(maybe_format, Handle<JSArray>());
+ UErrorCode status = U_ZERO_ERROR;
+ bool style_is_unit =
+ Style::UNIT == StyleFromSkeleton(fmt->toSkeleton(status));
+ CHECK(U_SUCCESS(status));
+
Handle<JSArray> result = factory->NewJSArray(0);
- Maybe<int> maybe_format_to_parts = ConstructParts(
- isolate, &formatted, result, 0, numeric_obj,
- number_format->style() == JSNumberFormat::Style::UNIT);
+ Maybe<int> maybe_format_to_parts =
+ ConstructParts(isolate, maybe_format.FromJust(), &fp_iter, result, 0,
+ numeric_obj, style_is_unit);
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index dd11564188..471398eafa 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -73,32 +73,11 @@ class JSNumberFormat : public JSObject {
DECL_PRINTER(JSNumberFormat)
DECL_VERIFIER(JSNumberFormat)
- // [[Style]] is one of the values "decimal", "percent", "currency",
- // or "unit" identifying the style of the number format.
- // Note: "unit" is added in proposal-unified-intl-numberformat
- enum class Style { DECIMAL, PERCENT, CURRENCY, UNIT };
-
- inline void set_style(Style style);
- inline Style style() const;
-
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JS_NUMBER_FORMAT_FIELDS)
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(StyleBits, Style, 2, _)
-
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
-
- STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
- STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
- STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
- STATIC_ASSERT(Style::UNIT <= StyleBits::kMax);
-
DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(numberingSystem, String)
DECL_ACCESSORS(icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>)
DECL_ACCESSORS(bound_format, Object)
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 849aae4dc9..dd5ac09904 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -7,6 +7,7 @@
#include "src/objects/js-objects.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-write-barrier.h"
#include "src/objects/elements.h"
#include "src/objects/embedder-data-slot-inl.h"
@@ -573,10 +574,12 @@ void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
if (has_feedback_vector() && feedback_vector().has_optimized_code()) {
if (FLAG_trace_opt) {
- PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
+ CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
+ PrintF(scope.file(),
+ "[evicting entry from optimizing code feedback slot (%s) for ",
reason);
- ShortPrint();
- PrintF("]\n");
+ ShortPrint(scope.file());
+ PrintF(scope.file(), "]\n");
}
feedback_vector().ClearOptimizedCode();
}
@@ -1023,8 +1026,6 @@ inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) {
ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
-TQ_SMI_ACCESSORS(JSStringIterator, index)
-
// If the fast-case backing storage takes up much more memory than a dictionary
// backing storage would, the object should have slow elements.
// static
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index e3ea252693..13741c4f62 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -12,6 +12,7 @@
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/handles/maybe-handles.h"
+#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
@@ -2137,10 +2138,10 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSMapIterator::kHeaderSize;
case JS_WEAK_REF_TYPE:
return JSWeakRef::kHeaderSize;
- case JS_FINALIZATION_GROUP_TYPE:
- return JSFinalizationGroup::kHeaderSize;
- case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
- return JSFinalizationGroupCleanupIterator::kHeaderSize;
+ case JS_FINALIZATION_REGISTRY_TYPE:
+ return JSFinalizationRegistry::kHeaderSize;
+ case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
+ return JSFinalizationRegistryCleanupIterator::kHeaderSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kHeaderSize;
case JS_WEAK_SET_TYPE:
@@ -2376,7 +2377,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
int enumeration_index = original_details.dictionary_index();
DCHECK_GT(enumeration_index, 0);
details = details.set_index(enumeration_index);
- dictionary->SetEntry(isolate, entry, *name, *value, details);
+ dictionary->SetEntry(entry, *name, *value, details);
}
}
}
@@ -3375,7 +3376,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Check that it really works.
DCHECK(object->HasFastProperties());
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
+ LOG(isolate, MapEvent("SlowToFast", old_map, new_map, reason));
}
return;
}
@@ -3465,7 +3466,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("SlowToFast", *old_map, *new_map, reason));
+ LOG(isolate, MapEvent("SlowToFast", old_map, new_map, reason));
}
// Transform the object.
object->synchronized_set_map(*new_map);
@@ -3796,7 +3797,7 @@ void JSObject::ApplyAttributesToDictionary(
if (v.IsAccessorPair()) attrs &= ~READ_ONLY;
}
details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
- dictionary->DetailsAtPut(isolate, i, details);
+ dictionary->DetailsAtPut(i, details);
}
}
@@ -4563,14 +4564,10 @@ void JSObject::SetImmutableProto(Handle<JSObject> object) {
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
- Arguments* args, uint32_t first_arg,
+ JavaScriptArguments* args,
uint32_t arg_count,
EnsureElementsMode mode) {
- // Elements in |Arguments| are ordered backwards (because they're on the
- // stack), but the method that's called here iterates over them in forward
- // direction.
- return EnsureCanContainElements(
- object, args->slot_at(first_arg + arg_count - 1), arg_count, mode);
+ return EnsureCanContainElements(object, args->first_slot(), arg_count, mode);
}
void JSObject::ValidateElements(JSObject object) {
@@ -5038,6 +5035,7 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
DCHECK(function->raw_feedback_cell() !=
isolate->heap()->many_closures_cell());
function->raw_feedback_cell().set_value(*feedback_vector);
+ function->raw_feedback_cell().SetInterruptBudget();
}
// static
@@ -5105,7 +5103,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// Deoptimize all code that embeds the previous initial map.
initial_map->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kInitialMapChangedGroup);
+ DependentCode::kInitialMapChangedGroup);
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
@@ -5168,8 +5166,9 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
if (FLAG_trace_maps) {
- LOG(function->GetIsolate(), MapEvent("InitialMap", Map(), *map, "",
- function->shared().DebugName()));
+ LOG(function->GetIsolate(), MapEvent("InitialMap", Handle<Map>(), map, "",
+ handle(function->shared().DebugName(),
+ function->GetIsolate())));
}
}
@@ -5250,7 +5249,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
- case JS_FINALIZATION_GROUP_TYPE:
+ case JS_FINALIZATION_REGISTRY_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
case JS_PROMISE_TYPE:
case JS_REG_EXP_TYPE:
@@ -5347,8 +5346,15 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
int in_object_properties;
int embedder_fields =
JSObject::GetEmbedderFieldCount(*constructor_initial_map);
+ // Constructor expects certain number of in-object properties to be in the
+ // object. However, CalculateExpectedNofProperties() may return smaller value
+ // if 1) the constructor is not in the prototype chain of new_target, or
+ // 2) the prototype chain is modified during iteration, or 3) compilation
+ // failure occur during prototype chain iteration.
+ // So we take the maximum of two values.
int expected_nof_properties =
- JSFunction::CalculateExpectedNofProperties(isolate, new_target);
+ Max(static_cast<int>(constructor->shared().expected_nof_properties()),
+ JSFunction::CalculateExpectedNofProperties(isolate, new_target));
JSFunction::CalculateInstanceSizeHelper(
instance_type, true, embedder_fields, expected_nof_properties,
&instance_size, &in_object_properties);
@@ -5596,9 +5602,10 @@ int JSFunction::CalculateExpectedNofProperties(Isolate* isolate,
return JSObject::kMaxInObjectProperties;
}
} else {
- // In case there was a compilation error for the constructor we will
- // throw an error during instantiation.
- break;
+ // In case there was a compilation error proceed iterating in case there
+ // will be a builtin function in the prototype chain that requires
+ // certain number of in-object properties.
+ continue;
}
}
// Inobject slack tracking will reclaim redundant inobject space
@@ -5890,7 +5897,7 @@ void JSMessageObject::EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<JSMessageObject> message) {
if (!message->DidEnsureSourcePositionsAvailable()) {
DCHECK_EQ(message->start_position(), -1);
- DCHECK_GE(message->bytecode_offset().value(), 0);
+ DCHECK_GE(message->bytecode_offset().value(), kFunctionEntryBytecodeOffset);
Handle<SharedFunctionInfo> shared_info(
SharedFunctionInfo::cast(message->shared_info()), isolate);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index e59179cb67..418c12ac50 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -515,7 +515,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
uint32_t length,
EnsureElementsMode mode);
static void EnsureCanContainElements(Handle<JSObject> object,
- Arguments* arguments, uint32_t first_arg,
+ JavaScriptArguments* arguments,
uint32_t arg_count,
EnsureElementsMode mode);
@@ -1443,10 +1443,6 @@ class JSStringIterator
DECL_PRINTER(JSStringIterator)
DECL_VERIFIER(JSStringIterator)
- // [index]: The [[StringIteratorNextIndex]] slot.
- inline int index() const;
- inline void set_index(int value);
-
TQ_OBJECT_CONSTRUCTORS(JSStringIterator)
};
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index b8fe7f50f0..40aae56b20 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -30,14 +30,14 @@ ACCESSORS(JSPluralRules, icu_number_formatter,
kIcuNumberFormatterOffset)
inline void JSPluralRules::set_type(Type type) {
- DCHECK_LE(type, TypeBits::kMax);
+ DCHECK_LE(type, TypeBit::kMax);
int hints = flags();
- hints = TypeBits::update(hints, type);
+ hints = TypeBit::update(hints, type);
set_flags(hints);
}
inline JSPluralRules::Type JSPluralRules::type() const {
- return TypeBits::decode(flags());
+ return TypeBit::decode(flags());
}
CAST_ACCESSOR(JSPluralRules)
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index bf928416f4..ffbd53034e 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -111,9 +111,14 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
// 11. Let r be ResolveLocale(%PluralRules%.[[AvailableLocales]],
// requestedLocales, opt, %PluralRules%.[[RelevantExtensionKeys]],
// localeData).
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSPluralRules::GetAvailableLocales(),
requested_locales, matcher, {});
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSPluralRules);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
@@ -124,7 +129,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
std::unique_ptr<icu::PluralRules> icu_plural_rules;
bool success =
CreateICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules);
- if (!success) {
+ if (!success || icu_plural_rules.get() == nullptr) {
// Remove extensions and try again.
icu::Locale no_extension_locale(r.icu_locale.getBaseName());
success = CreateICUPluralRules(isolate, no_extension_locale, type,
@@ -133,13 +138,12 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
icu::number::NumberFormatter::withLocale(no_extension_locale)
.roundingMode(UNUM_ROUND_HALFUP);
- if (!success) {
- FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
+ if (!success || icu_plural_rules.get() == nullptr) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSPluralRules);
}
}
- CHECK_NOT_NULL(icu_plural_rules.get());
-
// 9. Perform ? SetNumberFormatDigitOptions(pluralRules, options, 0, 3).
Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3, false);
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 0b2b91ef2b..c5df77b57b 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -58,16 +58,13 @@ class JSPluralRules : public JSObject {
DECL_PRINTER(JSPluralRules)
DECL_VERIFIER(JSPluralRules)
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) V(TypeBits, Type, 1, _)
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_PLURAL_RULES_FLAGS()
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ STATIC_ASSERT(Type::CARDINAL <= TypeBit::kMax);
+ STATIC_ASSERT(Type::ORDINAL <= TypeBit::kMax);
- STATIC_ASSERT(Type::CARDINAL <= TypeBits::kMax);
- STATIC_ASSERT(Type::ORDINAL <= TypeBits::kMax);
-
-// Layout description.
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JS_PLURAL_RULES_FIELDS)
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index 8b7a11a151..cffd10b9f1 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -18,7 +18,6 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPromise)
-TQ_SMI_ACCESSORS(JSPromise, flags)
BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index 66413bb651..efb13dc015 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -32,8 +32,6 @@ class JSPromise : public TorqueGeneratedJSPromise<JSPromise, JSObject> {
// [reactions]: Checks that the promise is pending and returns the reactions.
inline Object reactions() const;
- DECL_INT_ACCESSORS(flags)
-
// [has_handler]: Whether this promise has a reject handler or not.
DECL_BOOLEAN_ACCESSORS(has_handler)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index b204619058..ea44aaae27 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -17,7 +17,6 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator)
-TQ_SMI_ACCESSORS(JSRegExpStringIterator, flags)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, kDoneBit)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, kGlobalBit)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, kUnicodeBit)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index ffd38fe211..e54aedbc2b 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -17,8 +17,6 @@ class JSRegExpStringIterator
: public TorqueGeneratedJSRegExpStringIterator<JSRegExpStringIterator,
JSObject> {
public:
- DECL_INT_ACCESSORS(flags)
-
// [boolean]: The [[Done]] internal property.
DECL_BOOLEAN_ACCESSORS(done)
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 1ac0e4733e..74da187246 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -27,26 +27,15 @@ ACCESSORS(JSRelativeTimeFormat, icu_formatter,
Managed<icu::RelativeDateTimeFormatter>, kIcuFormatterOffset)
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
-inline void JSRelativeTimeFormat::set_style(Style style) {
- DCHECK_GE(StyleBits::kMax, style);
- int hints = flags();
- hints = StyleBits::update(hints, style);
- set_flags(hints);
-}
-
-inline JSRelativeTimeFormat::Style JSRelativeTimeFormat::style() const {
- return StyleBits::decode(flags());
-}
-
inline void JSRelativeTimeFormat::set_numeric(Numeric numeric) {
- DCHECK_GE(NumericBits::kMax, numeric);
+ DCHECK_GE(NumericBit::kMax, numeric);
int hints = flags();
- hints = NumericBits::update(hints, numeric);
+ hints = NumericBit::update(hints, numeric);
set_flags(hints);
}
inline JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::numeric() const {
- return NumericBits::decode(flags());
+ return NumericBit::decode(flags());
}
CAST_ACCESSOR(JSRelativeTimeFormat)
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 8768560c39..0cb6b117df 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -25,33 +25,42 @@ namespace v8 {
namespace internal {
namespace {
-UDateRelativeDateTimeFormatterStyle getIcuStyle(
- JSRelativeTimeFormat::Style style) {
+// Style: identifying the relative time format style used.
+//
+// ecma402/#sec-properties-of-intl-relativetimeformat-instances
+
+enum class Style {
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW // Use the shortest possible form.
+};
+
+UDateRelativeDateTimeFormatterStyle toIcuStyle(Style style) {
switch (style) {
- case JSRelativeTimeFormat::Style::LONG:
+ case Style::LONG:
return UDAT_STYLE_LONG;
- case JSRelativeTimeFormat::Style::SHORT:
+ case Style::SHORT:
return UDAT_STYLE_SHORT;
- case JSRelativeTimeFormat::Style::NARROW:
+ case Style::NARROW:
return UDAT_STYLE_NARROW;
}
UNREACHABLE();
}
-} // namespace
-
-JSRelativeTimeFormat::Style JSRelativeTimeFormat::getStyle(const char* str) {
- if (strcmp(str, "long") == 0) return JSRelativeTimeFormat::Style::LONG;
- if (strcmp(str, "short") == 0) return JSRelativeTimeFormat::Style::SHORT;
- if (strcmp(str, "narrow") == 0) return JSRelativeTimeFormat::Style::NARROW;
- UNREACHABLE();
-}
-JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::getNumeric(
- const char* str) {
- if (strcmp(str, "auto") == 0) return JSRelativeTimeFormat::Numeric::AUTO;
- if (strcmp(str, "always") == 0) return JSRelativeTimeFormat::Numeric::ALWAYS;
+Style fromIcuStyle(UDateRelativeDateTimeFormatterStyle icu_style) {
+ switch (icu_style) {
+ case UDAT_STYLE_LONG:
+ return Style::LONG;
+ case UDAT_STYLE_SHORT:
+ return Style::SHORT;
+ case UDAT_STYLE_NARROW:
+ return Style::NARROW;
+ case UDAT_STYLE_COUNT:
+ UNREACHABLE();
+ }
UNREACHABLE();
}
+} // namespace
MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
@@ -103,9 +112,14 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// ResolveLocale(%RelativeTimeFormat%.[[AvailableLocales]],
// requestedLocales, opt,
// %RelativeTimeFormat%.[[RelevantExtensionKeys]], localeData).
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSRelativeTimeFormat::GetAvailableLocales(),
requested_locales, matcher, {"nu"});
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSRelativeTimeFormat);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
UErrorCode status = U_ZERO_ERROR;
@@ -132,10 +146,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
CHECK(U_SUCCESS(status));
}
- Handle<String> numbering_system_string =
- isolate->factory()->NewStringFromAsciiChecked(
- Intl::GetNumberingSystem(icu_locale).c_str());
-
// 15. Let dataLocale be r.[[DataLocale]].
// 16. Let s be ? GetOption(options, "style", "string",
@@ -164,25 +174,41 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
icu::NumberFormat* number_format =
icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status);
if (U_FAILURE(status)) {
- delete number_format;
- FATAL("Failed to create ICU number format, are ICU data files missing?");
+ // Data build filter files excluded data in "rbnf_tree" since ECMA402 does
+ // not support "algorithmic" numbering systems. Therefore we may get the
+ // U_MISSING_RESOURCE_ERROR here. Fallback to locale without the numbering
+ // system and create the object again.
+ if (status == U_MISSING_RESOURCE_ERROR) {
+ delete number_format;
+ status = U_ZERO_ERROR;
+ icu_locale.setUnicodeKeywordValue("nu", nullptr, status);
+ CHECK(U_SUCCESS(status));
+ number_format =
+ icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status);
+ }
+ if (U_FAILURE(status) || number_format == nullptr) {
+ delete number_format;
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSRelativeTimeFormat);
+ }
}
- CHECK_NOT_NULL(number_format);
// Change UDISPCTX_CAPITALIZATION_NONE to other values if
// ECMA402 later include option to change capitalization.
// Ref: https://github.com/tc39/proposal-intl-relative-time/issues/11
icu::RelativeDateTimeFormatter* icu_formatter =
new icu::RelativeDateTimeFormatter(icu_locale, number_format,
- getIcuStyle(style_enum),
+ toIcuStyle(style_enum),
UDISPCTX_CAPITALIZATION_NONE, status);
- if (U_FAILURE(status)) {
+ if (U_FAILURE(status) || icu_formatter == nullptr) {
delete icu_formatter;
- FATAL(
- "Failed to create ICU relative date time formatter, are ICU data files "
- "missing?");
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSRelativeTimeFormat);
}
- CHECK_NOT_NULL(icu_formatter);
+
+ Handle<String> numbering_system_string =
+ isolate->factory()->NewStringFromAsciiChecked(
+ Intl::GetNumberingSystem(icu_locale).c_str());
Handle<Managed<icu::RelativeDateTimeFormatter>> managed_formatter =
Managed<icu::RelativeDateTimeFormatter>::FromRawPtr(isolate, 0,
@@ -191,11 +217,11 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
Handle<JSRelativeTimeFormat> relative_time_format_holder =
Handle<JSRelativeTimeFormat>::cast(
isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+
DisallowHeapAllocation no_gc;
relative_time_format_holder->set_flags(0);
relative_time_format_holder->set_locale(*locale_str);
relative_time_format_holder->set_numberingSystem(*numbering_system_string);
- relative_time_format_holder->set_style(style_enum);
relative_time_format_holder->set_numeric(numeric_enum);
relative_time_format_holder->set_icu_formatter(*managed_formatter);
@@ -203,16 +229,36 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
return relative_time_format_holder;
}
+namespace {
+
+Handle<String> StyleAsString(Isolate* isolate, Style style) {
+ switch (style) {
+ case Style::LONG:
+ return ReadOnlyRoots(isolate).long_string_handle();
+ case Style::SHORT:
+ return ReadOnlyRoots(isolate).short_string_handle();
+ case Style::NARROW:
+ return ReadOnlyRoots(isolate).narrow_string_handle();
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder) {
Factory* factory = isolate->factory();
+ icu::RelativeDateTimeFormatter* formatter =
+ format_holder->icu_formatter().raw();
+ CHECK_NOT_NULL(formatter);
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
Handle<String> locale(format_holder->locale(), isolate);
Handle<String> numberingSystem(format_holder->numberingSystem(), isolate);
JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
NONE);
- JSObject::AddProperty(isolate, result, factory->style_string(),
- format_holder->StyleAsString(), NONE);
+ JSObject::AddProperty(
+ isolate, result, factory->style_string(),
+ StyleAsString(isolate, fromIcuStyle(formatter->getFormatStyle())), NONE);
JSObject::AddProperty(isolate, result, factory->numeric_string(),
format_holder->NumericAsString(), NONE);
JSObject::AddProperty(isolate, result, factory->numberingSystem_string(),
@@ -220,18 +266,6 @@ Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
return result;
}
-Handle<String> JSRelativeTimeFormat::StyleAsString() const {
- switch (style()) {
- case Style::LONG:
- return GetReadOnlyRoots().long_string_handle();
- case Style::SHORT:
- return GetReadOnlyRoots().short_string_handle();
- case Style::NARROW:
- return GetReadOnlyRoots().narrow_string_handle();
- }
- UNREACHABLE();
-}
-
Handle<String> JSRelativeTimeFormat::NumericAsString() const {
switch (numeric()) {
case Numeric::ALWAYS:
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 99d09e5c3c..87aac9f060 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -40,7 +40,6 @@ class JSRelativeTimeFormat : public JSObject {
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder);
- Handle<String> StyleAsString() const;
Handle<String> NumericAsString() const;
// ecma402/#sec-Intl.RelativeTimeFormat.prototype.format
@@ -63,18 +62,6 @@ class JSRelativeTimeFormat : public JSObject {
DECL_ACCESSORS(icu_formatter, Managed<icu::RelativeDateTimeFormatter>)
- // Style: identifying the relative time format style used.
- //
- // ecma402/#sec-properties-of-intl-relativetimeformat-instances
-
- enum class Style {
- LONG, // Everything spelled out.
- SHORT, // Abbreviations used when possible.
- NARROW // Use the shortest possible form.
- };
- inline void set_style(Style style);
- inline Style style() const;
-
// Numeric: identifying whether numerical descriptions are always used, or
// used only when no more specific version is available (e.g., "1 day ago" vs
// "yesterday").
@@ -88,18 +75,11 @@ class JSRelativeTimeFormat : public JSObject {
inline void set_numeric(Numeric numeric);
inline Numeric numeric() const;
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(StyleBits, Style, 2, _) \
- V(NumericBits, Numeric, 1, _)
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_RELATIVE_TIME_FORMAT_FLAGS()
- STATIC_ASSERT(Style::LONG <= StyleBits::kMax);
- STATIC_ASSERT(Style::SHORT <= StyleBits::kMax);
- STATIC_ASSERT(Style::NARROW <= StyleBits::kMax);
- STATIC_ASSERT(Numeric::AUTO <= NumericBits::kMax);
- STATIC_ASSERT(Numeric::ALWAYS <= NumericBits::kMax);
+ STATIC_ASSERT(Numeric::AUTO <= NumericBit::kMax);
+ STATIC_ASSERT(Numeric::ALWAYS <= NumericBit::kMax);
// [flags] Bit field containing various flags about the function.
DECL_INT_ACCESSORS(flags)
@@ -112,9 +92,6 @@ class JSRelativeTimeFormat : public JSObject {
TORQUE_GENERATED_JS_RELATIVE_TIME_FORMAT_FIELDS)
private:
- static Style getStyle(const char* str);
- static Numeric getNumeric(const char* str);
-
OBJECT_CONSTRUCTORS(JSRelativeTimeFormat, JSObject);
};
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index b2d745179a..f312f7c91a 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSSegmentIterator, unicode_string, Managed<icu::UnicodeString>,
kUnicodeStringOffset)
BIT_FIELD_ACCESSORS(JSSegmentIterator, flags, is_break_type_set,
- JSSegmentIterator::BreakTypeSetBits)
+ JSSegmentIterator::BreakTypeSetBit)
SMI_ACCESSORS(JSSegmentIterator, flags, kFlagsOffset)
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index 01fc268298..81ebc4dbd2 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -76,12 +76,8 @@ class JSSegmentIterator : public JSObject {
inline void set_granularity(JSSegmenter::Granularity granularity);
inline JSSegmenter::Granularity granularity() const;
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(GranularityBits, JSSegmenter::Granularity, 2, _) \
- V(BreakTypeSetBits, bool, 1, _)
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_SEGMENT_ITERATOR_FLAGS()
STATIC_ASSERT(JSSegmenter::Granularity::GRAPHEME <= GranularityBits::kMax);
STATIC_ASSERT(JSSegmenter::Granularity::WORD <= GranularityBits::kMax);
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 7985cf1c99..39cf15f628 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -23,13 +23,6 @@
namespace v8 {
namespace internal {
-JSSegmenter::Granularity JSSegmenter::GetGranularity(const char* str) {
- if (strcmp(str, "grapheme") == 0) return JSSegmenter::Granularity::GRAPHEME;
- if (strcmp(str, "word") == 0) return JSSegmenter::Granularity::WORD;
- if (strcmp(str, "sentence") == 0) return JSSegmenter::Granularity::SENTENCE;
- UNREACHABLE();
-}
-
MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
Handle<Object> locales,
Handle<Object> input_options) {
@@ -64,9 +57,15 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
// 9. Let r be ResolveLocale(%Segmenter%.[[AvailableLocales]],
// requestedLocales, opt, %Segmenter%.[[RelevantExtensionKeys]]).
- Intl::ResolvedLocale r =
+ Maybe<Intl::ResolvedLocale> maybe_resolve_locale =
Intl::ResolveLocale(isolate, JSSegmenter::GetAvailableLocales(),
requested_locales, matcher, {});
+ if (maybe_resolve_locale.IsNothing()) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSSegmenter);
+ }
+ Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
+
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index b683e95bf1..b2cd1cac1b 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -62,10 +62,8 @@ class JSSegmenter : public JSObject {
inline void set_granularity(Granularity granularity);
inline Granularity granularity() const;
-// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) V(GranularityBits, Granularity, 2, _)
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ // Bit positions in |flags|.
+ DEFINE_TORQUE_GENERATED_JS_SEGMENTER_FLAGS()
STATIC_ASSERT(Granularity::GRAPHEME <= GranularityBits::kMax);
STATIC_ASSERT(Granularity::WORD <= GranularityBits::kMax);
@@ -82,8 +80,6 @@ class JSSegmenter : public JSObject {
TORQUE_GENERATED_JS_SEGMENTER_FIELDS)
private:
- static Granularity GetGranularity(const char* str);
-
OBJECT_CONSTRUCTORS(JSSegmenter, JSObject);
};
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 477b88cdd1..939964b47e 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -19,25 +19,26 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
-OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroup, JSObject)
-TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator)
+OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistryCleanupIterator)
-ACCESSORS(JSFinalizationGroup, native_context, NativeContext,
+ACCESSORS(JSFinalizationRegistry, native_context, NativeContext,
kNativeContextOffset)
-ACCESSORS(JSFinalizationGroup, cleanup, Object, kCleanupOffset)
-ACCESSORS(JSFinalizationGroup, active_cells, HeapObject, kActiveCellsOffset)
-ACCESSORS(JSFinalizationGroup, cleared_cells, HeapObject, kClearedCellsOffset)
-ACCESSORS(JSFinalizationGroup, key_map, Object, kKeyMapOffset)
-SMI_ACCESSORS(JSFinalizationGroup, flags, kFlagsOffset)
-ACCESSORS(JSFinalizationGroup, next, Object, kNextOffset)
-CAST_ACCESSOR(JSFinalizationGroup)
-
-void JSFinalizationGroup::Register(
- Handle<JSFinalizationGroup> finalization_group, Handle<JSReceiver> target,
- Handle<Object> holdings, Handle<Object> unregister_token,
- Isolate* isolate) {
+ACCESSORS(JSFinalizationRegistry, cleanup, Object, kCleanupOffset)
+ACCESSORS(JSFinalizationRegistry, active_cells, HeapObject, kActiveCellsOffset)
+ACCESSORS(JSFinalizationRegistry, cleared_cells, HeapObject,
+ kClearedCellsOffset)
+ACCESSORS(JSFinalizationRegistry, key_map, Object, kKeyMapOffset)
+SMI_ACCESSORS(JSFinalizationRegistry, flags, kFlagsOffset)
+ACCESSORS(JSFinalizationRegistry, next_dirty, Object, kNextDirtyOffset)
+CAST_ACCESSOR(JSFinalizationRegistry)
+
+void JSFinalizationRegistry::Register(
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<JSReceiver> target, Handle<Object> holdings,
+ Handle<Object> unregister_token, Isolate* isolate) {
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell();
- weak_cell->set_finalization_group(*finalization_group);
+ weak_cell->set_finalization_registry(*finalization_registry);
weak_cell->set_target(*target);
weak_cell->set_holdings(*holdings);
weak_cell->set_prev(ReadOnlyRoots(isolate).undefined_value());
@@ -47,19 +48,20 @@ void JSFinalizationGroup::Register(
weak_cell->set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
// Add to active_cells.
- weak_cell->set_next(finalization_group->active_cells());
- if (finalization_group->active_cells().IsWeakCell()) {
- WeakCell::cast(finalization_group->active_cells()).set_prev(*weak_cell);
+ weak_cell->set_next(finalization_registry->active_cells());
+ if (finalization_registry->active_cells().IsWeakCell()) {
+ WeakCell::cast(finalization_registry->active_cells()).set_prev(*weak_cell);
}
- finalization_group->set_active_cells(*weak_cell);
+ finalization_registry->set_active_cells(*weak_cell);
if (!unregister_token->IsUndefined(isolate)) {
Handle<SimpleNumberDictionary> key_map;
- if (finalization_group->key_map().IsUndefined(isolate)) {
+ if (finalization_registry->key_map().IsUndefined(isolate)) {
key_map = SimpleNumberDictionary::New(isolate, 1);
} else {
- key_map = handle(
- SimpleNumberDictionary::cast(finalization_group->key_map()), isolate);
+ key_map =
+ handle(SimpleNumberDictionary::cast(finalization_registry->key_map()),
+ isolate);
}
// Unregister tokens are held weakly as objects are often their own
@@ -74,29 +76,29 @@ void JSFinalizationGroup::Register(
weak_cell->set_key_list_next(existing_weak_cell);
}
key_map = SimpleNumberDictionary::Set(isolate, key_map, key, weak_cell);
- finalization_group->set_key_map(*key_map);
+ finalization_registry->set_key_map(*key_map);
}
}
-bool JSFinalizationGroup::Unregister(
- Handle<JSFinalizationGroup> finalization_group,
+bool JSFinalizationRegistry::Unregister(
+ Handle<JSFinalizationRegistry> finalization_registry,
Handle<JSReceiver> unregister_token, Isolate* isolate) {
// Iterate through the doubly linked list of WeakCells associated with the
// key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
- // its FinalizationGroup; remove it from there.
- return finalization_group->RemoveUnregisterToken(
+ // its FinalizationRegistry; remove it from there.
+ return finalization_registry->RemoveUnregisterToken(
*unregister_token, isolate,
[isolate](WeakCell matched_cell) {
- matched_cell.RemoveFromFinalizationGroupCells(isolate);
+ matched_cell.RemoveFromFinalizationRegistryCells(isolate);
},
[](HeapObject, ObjectSlot, Object) {});
}
template <typename MatchCallback, typename GCNotifyUpdatedSlotCallback>
-bool JSFinalizationGroup::RemoveUnregisterToken(
+bool JSFinalizationRegistry::RemoveUnregisterToken(
JSReceiver unregister_token, Isolate* isolate, MatchCallback match_callback,
GCNotifyUpdatedSlotCallback gc_notify_updated_slot) {
- // This method is called from both FinalizationGroup#unregister and for
+ // This method is called from both FinalizationRegistry#unregister and for
// removing weakly-held dead unregister tokens. The latter is during GC so
// this function cannot GC.
DisallowHeapAllocation no_gc;
@@ -159,7 +161,7 @@ bool JSFinalizationGroup::RemoveUnregisterToken(
}
if (new_key_list_head.IsUndefined(isolate)) {
DCHECK(was_present);
- key_map.ClearEntry(isolate, entry);
+ key_map.ClearEntry(entry);
key_map.ElementRemoved();
} else {
key_map.ValueAtPut(entry, new_key_list_head);
@@ -169,41 +171,42 @@ bool JSFinalizationGroup::RemoveUnregisterToken(
return was_present;
}
-bool JSFinalizationGroup::NeedsCleanup() const {
+bool JSFinalizationRegistry::NeedsCleanup() const {
return cleared_cells().IsWeakCell();
}
-bool JSFinalizationGroup::scheduled_for_cleanup() const {
+bool JSFinalizationRegistry::scheduled_for_cleanup() const {
return ScheduledForCleanupField::decode(flags());
}
-void JSFinalizationGroup::set_scheduled_for_cleanup(
+void JSFinalizationRegistry::set_scheduled_for_cleanup(
bool scheduled_for_cleanup) {
set_flags(ScheduledForCleanupField::update(flags(), scheduled_for_cleanup));
}
-Object JSFinalizationGroup::PopClearedCellHoldings(
- Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
+Object JSFinalizationRegistry::PopClearedCellHoldings(
+ Handle<JSFinalizationRegistry> finalization_registry, Isolate* isolate) {
Handle<WeakCell> weak_cell =
- handle(WeakCell::cast(finalization_group->cleared_cells()), isolate);
+ handle(WeakCell::cast(finalization_registry->cleared_cells()), isolate);
DCHECK(weak_cell->prev().IsUndefined(isolate));
- finalization_group->set_cleared_cells(weak_cell->next());
+ finalization_registry->set_cleared_cells(weak_cell->next());
weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
- if (finalization_group->cleared_cells().IsWeakCell()) {
+ if (finalization_registry->cleared_cells().IsWeakCell()) {
WeakCell cleared_cells_head =
- WeakCell::cast(finalization_group->cleared_cells());
+ WeakCell::cast(finalization_registry->cleared_cells());
DCHECK_EQ(cleared_cells_head.prev(), *weak_cell);
cleared_cells_head.set_prev(ReadOnlyRoots(isolate).undefined_value());
} else {
- DCHECK(finalization_group->cleared_cells().IsUndefined(isolate));
+ DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
}
// Also remove the WeakCell from the key_map (if it's there).
if (!weak_cell->unregister_token().IsUndefined(isolate)) {
if (weak_cell->key_list_prev().IsUndefined(isolate)) {
- Handle<SimpleNumberDictionary> key_map = handle(
- SimpleNumberDictionary::cast(finalization_group->key_map()), isolate);
+ Handle<SimpleNumberDictionary> key_map =
+ handle(SimpleNumberDictionary::cast(finalization_registry->key_map()),
+ isolate);
Handle<Object> unregister_token =
handle(weak_cell->unregister_token(), isolate);
uint32_t key = Smi::ToInt(unregister_token->GetHash());
@@ -214,7 +217,7 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
// from the hash table.
DCHECK(entry.is_found());
key_map = SimpleNumberDictionary::DeleteEntry(isolate, key_map, entry);
- finalization_group->set_key_map(*key_map);
+ finalization_registry->set_key_map(*key_map);
} else {
// weak_cell is the list head for its key; we need to change the value
// of the key in the hash table.
@@ -224,7 +227,7 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
next->set_key_list_prev(ReadOnlyRoots(isolate).undefined_value());
weak_cell->set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
key_map = SimpleNumberDictionary::Set(isolate, key_map, key, next);
- finalization_group->set_key_map(*key_map);
+ finalization_registry->set_key_map(*key_map);
}
} else {
// weak_cell is somewhere in the middle of its key list.
@@ -244,25 +247,26 @@ template <typename GCNotifyUpdatedSlotCallback>
void WeakCell::Nullify(Isolate* isolate,
GCNotifyUpdatedSlotCallback gc_notify_updated_slot) {
// Remove from the WeakCell from the "active_cells" list of its
- // JSFinalizationGroup and insert it into the "cleared_cells" list. This is
+ // JSFinalizationRegistry and insert it into the "cleared_cells" list. This is
// only called for WeakCells which haven't been unregistered yet, so they will
// be in the active_cells list. (The caller must guard against calling this
// for unregistered WeakCells by checking that the target is not undefined.)
DCHECK(target().IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
- JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
+ JSFinalizationRegistry fr =
+ JSFinalizationRegistry::cast(finalization_registry());
if (prev().IsWeakCell()) {
- DCHECK_NE(fg.active_cells(), *this);
+ DCHECK_NE(fr.active_cells(), *this);
WeakCell prev_cell = WeakCell::cast(prev());
prev_cell.set_next(next());
gc_notify_updated_slot(prev_cell, prev_cell.RawField(WeakCell::kNextOffset),
next());
} else {
- DCHECK_EQ(fg.active_cells(), *this);
- fg.set_active_cells(next());
+ DCHECK_EQ(fr.active_cells(), *this);
+ fr.set_active_cells(next());
gc_notify_updated_slot(
- fg, fg.RawField(JSFinalizationGroup::kActiveCellsOffset), next());
+ fr, fr.RawField(JSFinalizationRegistry::kActiveCellsOffset), next());
}
if (next().IsWeakCell()) {
WeakCell next_cell = WeakCell::cast(next());
@@ -272,7 +276,7 @@ void WeakCell::Nullify(Isolate* isolate,
}
set_prev(ReadOnlyRoots(isolate).undefined_value());
- Object cleared_head = fg.cleared_cells();
+ Object cleared_head = fr.cleared_cells();
if (cleared_head.IsWeakCell()) {
WeakCell cleared_head_cell = WeakCell::cast(cleared_head);
cleared_head_cell.set_prev(*this);
@@ -280,29 +284,30 @@ void WeakCell::Nullify(Isolate* isolate,
cleared_head_cell.RawField(WeakCell::kPrevOffset),
*this);
}
- set_next(fg.cleared_cells());
+ set_next(fr.cleared_cells());
gc_notify_updated_slot(*this, RawField(WeakCell::kNextOffset), next());
- fg.set_cleared_cells(*this);
+ fr.set_cleared_cells(*this);
gc_notify_updated_slot(
- fg, fg.RawField(JSFinalizationGroup::kClearedCellsOffset), *this);
+ fr, fr.RawField(JSFinalizationRegistry::kClearedCellsOffset), *this);
}
-void WeakCell::RemoveFromFinalizationGroupCells(Isolate* isolate) {
+void WeakCell::RemoveFromFinalizationRegistryCells(Isolate* isolate) {
// Remove the WeakCell from the list it's in (either "active_cells" or
- // "cleared_cells" of its JSFinalizationGroup).
+ // "cleared_cells" of its JSFinalizationRegistry).
// It's important to set_target to undefined here. This guards that we won't
// call Nullify (which assumes that the WeakCell is in active_cells).
DCHECK(target().IsUndefined() || target().IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
- JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
- if (fg.active_cells() == *this) {
+ JSFinalizationRegistry fr =
+ JSFinalizationRegistry::cast(finalization_registry());
+ if (fr.active_cells() == *this) {
DCHECK(prev().IsUndefined(isolate));
- fg.set_active_cells(next());
- } else if (fg.cleared_cells() == *this) {
+ fr.set_active_cells(next());
+ } else if (fr.cleared_cells() == *this) {
DCHECK(!prev().IsWeakCell());
- fg.set_cleared_cells(next());
+ fr.set_cleared_cells(next());
} else {
DCHECK(prev().IsWeakCell());
WeakCell prev_cell = WeakCell::cast(prev());
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 0b18240e70..ff5cad7ee3 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_JS_WEAK_REFS_H_
#include "src/objects/js-objects.h"
-#include "src/objects/microtask.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,13 +16,13 @@ namespace internal {
class NativeContext;
class WeakCell;
-// FinalizationGroup object from the JS Weak Refs spec proposal:
+// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
-class JSFinalizationGroup : public JSObject {
+class JSFinalizationRegistry : public JSObject {
public:
- DECL_PRINTER(JSFinalizationGroup)
- EXPORT_DECL_VERIFIER(JSFinalizationGroup)
- DECL_CAST(JSFinalizationGroup)
+ DECL_PRINTER(JSFinalizationRegistry)
+ EXPORT_DECL_VERIFIER(JSFinalizationRegistry)
+ DECL_CAST(JSFinalizationRegistry)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(cleanup, Object)
@@ -32,18 +31,19 @@ class JSFinalizationGroup : public JSObject {
DECL_ACCESSORS(cleared_cells, HeapObject)
DECL_ACCESSORS(key_map, Object)
- // For storing a list of JSFinalizationGroup objects in NativeContext.
- DECL_ACCESSORS(next, Object)
+ DECL_ACCESSORS(next_dirty, Object)
DECL_INT_ACCESSORS(flags)
- inline static void Register(Handle<JSFinalizationGroup> finalization_group,
- Handle<JSReceiver> target,
- Handle<Object> holdings, Handle<Object> key,
- Isolate* isolate);
- inline static bool Unregister(Handle<JSFinalizationGroup> finalization_group,
- Handle<JSReceiver> unregister_token,
- Isolate* isolate);
+ class BodyDescriptor;
+
+ inline static void Register(
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<JSReceiver> target, Handle<Object> holdings, Handle<Object> key,
+ Isolate* isolate);
+ inline static bool Unregister(
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<JSReceiver> unregister_token, Isolate* isolate);
// RemoveUnregisterToken is called from both Unregister and during GC. Since
// it modifies slots in key_map and WeakCells and the normal write barrier is
@@ -64,27 +64,27 @@ class JSFinalizationGroup : public JSObject {
// Remove the first cleared WeakCell from the cleared_cells
// list (assumes there is one) and return its holdings.
inline static Object PopClearedCellHoldings(
- Handle<JSFinalizationGroup> finalization_group, Isolate* isolate);
+ Handle<JSFinalizationRegistry> finalization_registry, Isolate* isolate);
// Constructs an iterator for the WeakCells in the cleared_cells list and
// calls the user's cleanup function.
//
// Returns Nothing<bool> if exception occurs, otherwise returns Just(true).
static V8_WARN_UNUSED_RESULT Maybe<bool> Cleanup(
- Isolate* isolate, Handle<JSFinalizationGroup> finalization_group,
+ Isolate* isolate, Handle<JSFinalizationRegistry> finalization_registry,
Handle<Object> callback);
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_FINALIZATION_GROUP_FIELDS)
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSObject::kHeaderSize, TORQUE_GENERATED_JS_FINALIZATION_REGISTRY_FIELDS)
// Bitfields in flags.
using ScheduledForCleanupField = base::BitField<bool, 0, 1>;
- OBJECT_CONSTRUCTORS(JSFinalizationGroup, JSObject);
+ OBJECT_CONSTRUCTORS(JSFinalizationRegistry, JSObject);
};
-// Internal object for storing weak references in JSFinalizationGroup.
+// Internal object for storing weak references in JSFinalizationRegistry.
class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
public:
DECL_PRINTER(WeakCell)
@@ -93,14 +93,14 @@ class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
class BodyDescriptor;
// Nullify is called during GC and it modifies the pointers in WeakCell and
- // JSFinalizationGroup. Thus we need to tell the GC about the modified slots
- // via the gc_notify_updated_slot function. The normal write barrier is not
- // enough, since it's disabled before GC.
+ // JSFinalizationRegistry. Thus we need to tell the GC about the modified
+ // slots via the gc_notify_updated_slot function. The normal write barrier is
+ // not enough, since it's disabled before GC.
template <typename GCNotifyUpdatedSlotCallback>
inline void Nullify(Isolate* isolate,
GCNotifyUpdatedSlotCallback gc_notify_updated_slot);
- inline void RemoveFromFinalizationGroupCells(Isolate* isolate);
+ inline void RemoveFromFinalizationRegistryCells(Isolate* isolate);
TQ_OBJECT_CONSTRUCTORS(WeakCell)
};
@@ -115,14 +115,14 @@ class JSWeakRef : public TorqueGeneratedJSWeakRef<JSWeakRef, JSObject> {
TQ_OBJECT_CONSTRUCTORS(JSWeakRef)
};
-class JSFinalizationGroupCleanupIterator
- : public TorqueGeneratedJSFinalizationGroupCleanupIterator<
- JSFinalizationGroupCleanupIterator, JSObject> {
+class JSFinalizationRegistryCleanupIterator
+ : public TorqueGeneratedJSFinalizationRegistryCleanupIterator<
+ JSFinalizationRegistryCleanupIterator, JSObject> {
public:
- DECL_PRINTER(JSFinalizationGroupCleanupIterator)
- DECL_VERIFIER(JSFinalizationGroupCleanupIterator)
+ DECL_PRINTER(JSFinalizationRegistryCleanupIterator)
+ DECL_VERIFIER(JSFinalizationRegistryCleanupIterator)
- TQ_OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator)
+ TQ_OBJECT_CONSTRUCTORS(JSFinalizationRegistryCleanupIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-weak-refs.tq b/deps/v8/src/objects/js-weak-refs.tq
index 879ee6024f..7adcb93d70 100644
--- a/deps/v8/src/objects/js-weak-refs.tq
+++ b/deps/v8/src/objects/js-weak-refs.tq
@@ -2,37 +2,39 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern class JSFinalizationGroup extends JSObject {
+extern class JSFinalizationRegistry extends JSObject {
native_context: NativeContext;
cleanup: Object;
active_cells: Undefined|WeakCell;
cleared_cells: Undefined|WeakCell;
key_map: Object;
- next: Undefined|JSFinalizationGroup;
+ // For the linked list of FinalizationRegistries that need cleanup. This
+ // link is weak.
+ next_dirty: Undefined|JSFinalizationRegistry;
flags: Smi;
}
@generateCppClass
-extern class JSFinalizationGroupCleanupIterator extends JSObject {
- finalization_group: JSFinalizationGroup;
+extern class JSFinalizationRegistryCleanupIterator extends JSObject {
+ finalization_registry: JSFinalizationRegistry;
}
@generateCppClass
extern class WeakCell extends HeapObject {
- finalization_group: Undefined|JSFinalizationGroup;
+ finalization_registry: Undefined|JSFinalizationRegistry;
target: Undefined|JSReceiver;
unregister_token: Object;
holdings: Object;
- // For storing doubly linked lists of WeakCells in JSFinalizationGroup's
+ // For storing doubly linked lists of WeakCells in JSFinalizationRegistry's
// "active_cells" and "cleared_cells" lists.
prev: Undefined|WeakCell;
next: Undefined|WeakCell;
// For storing doubly linked lists of WeakCells per key in
- // JSFinalizationGroup's key-based hashmap. The key is the identity hash of
- // unregister_token. WeakCell also needs to know its token, so that we can
- // remove its corresponding key from the key_map when we remove the last
+ // JSFinalizationRegistry's key-based hashmap. The key is the identity hash
+ // of unregister_token. WeakCell also needs to know its token, so that we
+ // can remove its corresponding key from the key_map when we remove the last
// WeakCell associated with it or when the unregister_token dies. The
// unregister token is stored above, after target, as both are weak.
key_list_prev: Undefined|WeakCell;
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 99258e695c..16008e39d8 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -57,13 +57,14 @@ static int AddKey(Object key, Handle<FixedArray> combined_keys,
static Handle<FixedArray> CombineKeys(Isolate* isolate,
Handle<FixedArray> own_keys,
Handle<FixedArray> prototype_chain_keys,
- Handle<JSReceiver> receiver) {
+ Handle<JSReceiver> receiver,
+ bool may_have_elements) {
int prototype_chain_keys_length = prototype_chain_keys->length();
if (prototype_chain_keys_length == 0) return own_keys;
Map map = receiver->map();
int nof_descriptors = map.NumberOfOwnDescriptors();
- if (nof_descriptors == 0) return prototype_chain_keys;
+ if (nof_descriptors == 0 && !may_have_elements) return prototype_chain_keys;
Handle<DescriptorArray> descs(map.instance_descriptors(), isolate);
int own_keys_length = own_keys.is_null() ? 0 : own_keys->length();
@@ -323,13 +324,18 @@ void FastKeyAccumulator::Prepare() {
// Fully walk the prototype chain and find the last prototype with keys.
is_receiver_simple_enum_ = false;
has_empty_prototype_ = true;
+ only_own_has_simple_elements_ =
+ !receiver_->map().IsCustomElementsReceiverMap();
JSReceiver last_prototype;
may_have_elements_ = MayHaveElements(*receiver_);
for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
iter.Advance()) {
JSReceiver current = iter.GetCurrent<JSReceiver>();
- if (!may_have_elements_) {
- may_have_elements_ = MayHaveElements(current);
+ if (!may_have_elements_ || only_own_has_simple_elements_) {
+ if (MayHaveElements(current)) {
+ may_have_elements_ = true;
+ only_own_has_simple_elements_ = false;
+ }
}
bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current);
if (has_no_properties) continue;
@@ -564,8 +570,23 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysWithPrototypeInfoCache(
GetKeysConversion keys_conversion) {
- Handle<FixedArray> own_keys = KeyAccumulator::GetOwnEnumPropertyKeys(
- isolate_, Handle<JSObject>::cast(receiver_));
+ Handle<FixedArray> own_keys;
+ if (may_have_elements_) {
+ MaybeHandle<FixedArray> maybe_own_keys;
+ if (receiver_->map().is_dictionary_map()) {
+ maybe_own_keys = GetOwnKeysWithElements<false>(
+ isolate_, Handle<JSObject>::cast(receiver_), keys_conversion,
+ skip_indices_);
+ } else {
+ maybe_own_keys = GetOwnKeysWithElements<true>(
+ isolate_, Handle<JSObject>::cast(receiver_), keys_conversion,
+ skip_indices_);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, own_keys, maybe_own_keys, FixedArray);
+ } else {
+ own_keys = KeyAccumulator::GetOwnEnumPropertyKeys(
+ isolate_, Handle<JSObject>::cast(receiver_));
+ }
Handle<FixedArray> prototype_chain_keys;
if (has_prototype_info_cache_) {
prototype_chain_keys =
@@ -587,7 +608,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysWithPrototypeInfoCache(
prototype_chain_keys = accumulator.GetKeys(keys_conversion);
}
Handle<FixedArray> result = CombineKeys(
- isolate_, own_keys, prototype_chain_keys, receiver_);
+ isolate_, own_keys, prototype_chain_keys, receiver_, may_have_elements_);
if (is_for_in_ && own_keys.is_identical_to(result)) {
// Don't leak the enumeration cache without the receiver since it might get
// trimmed otherwise.
@@ -605,7 +626,7 @@ bool FastKeyAccumulator::MayHaveElements(JSReceiver receiver) {
}
bool FastKeyAccumulator::TryPrototypeInfoCache(Handle<JSReceiver> receiver) {
- if (may_have_elements_) return false;
+ if (may_have_elements_ && !only_own_has_simple_elements_) return false;
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
if (!object->HasFastProperties()) return false;
if (object->HasNamedInterceptor()) return false;
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index fd8b3937c8..d0c27b2a4d 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -192,6 +192,7 @@ class FastKeyAccumulator {
bool may_have_elements_ = true;
bool has_prototype_info_cache_ = false;
bool try_prototype_info_cache_ = false;
+ bool only_own_has_simple_elements_ = false;
DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
};
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 7f0fb6d335..df04c827fe 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -128,8 +128,6 @@ ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
TQ_OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription)
-TQ_SMI_ACCESSORS(ArrayBoilerplateDescription, flags)
-
ElementsKind ArrayBoilerplateDescription::elements_kind() const {
return static_cast<ElementsKind>(flags());
}
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index c441f05a52..623fbcbaf1 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -5,9 +5,13 @@
#include "src/objects/literal-objects.h"
#include "src/ast/ast.h"
+#include "src/base/logging.h"
#include "src/builtins/accessors.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
+#include "src/heap/off-thread-factory-inl.h"
+#include "src/objects/dictionary.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
@@ -27,8 +31,9 @@ inline int EncodeComputedEntry(ClassBoilerplate::ValueKind value_kind,
return flags;
}
+template <typename LocalIsolate>
void AddToDescriptorArrayTemplate(
- Isolate* isolate, Handle<DescriptorArray> descriptor_array_template,
+ LocalIsolate* isolate, Handle<DescriptorArray> descriptor_array_template,
Handle<Name> name, ClassBoilerplate::ValueKind value_kind,
Handle<Object> value) {
InternalIndex entry = descriptor_array_template->Search(
@@ -79,17 +84,19 @@ void AddToDescriptorArrayTemplate(
}
}
+template <typename LocalIsolate>
Handle<NameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- Isolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
+ LocalIsolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr) {
return NameDictionary::AddNoUpdateNextEnumerationIndex(
isolate, dictionary, name, value, details, entry_out);
}
+template <typename LocalIsolate>
Handle<NumberDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t element,
- Handle<Object> value, PropertyDetails details,
+ LocalIsolate* isolate, Handle<NumberDictionary> dictionary,
+ uint32_t element, Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr) {
// NumberDictionary does not maintain the enumeration order, so it's
// a normal Add().
@@ -120,12 +127,13 @@ inline int GetExistingValueIndex(Object value) {
return value.IsSmi() ? Smi::ToInt(value) : -1;
}
-template <typename Dictionary, typename Key>
-void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
- Key key, int key_index,
+template <typename LocalIsolate, typename Dictionary, typename Key>
+void AddToDictionaryTemplate(LocalIsolate* isolate,
+ Handle<Dictionary> dictionary, Key key,
+ int key_index,
ClassBoilerplate::ValueKind value_kind,
Smi value) {
- InternalIndex entry = dictionary->FindEntry(isolate, key);
+ InternalIndex entry = dictionary->FindEntry(ReadOnlyRoots(isolate), key);
if (entry.is_not_found()) {
// Entry not found, add new one.
@@ -183,7 +191,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// was not defined yet, so overwrite property to kData.
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
- dictionary->DetailsAtPut(isolate, entry, details);
+ dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, value);
} else {
@@ -217,7 +225,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
if (!existing_value.IsSmi() || Smi::ToInt(existing_value) < key_index) {
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
- dictionary->DetailsAtPut(isolate, entry, details);
+ dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, value);
}
}
@@ -241,7 +249,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
pair->set(component, value);
PropertyDetails details(kAccessor, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
- dictionary->DetailsAtPut(isolate, entry, details);
+ dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, *pair);
}
}
@@ -252,6 +260,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// Helper class that eases building of a properties, elements and computed
// properties templates.
+template <typename LocalIsolate>
class ObjectDescriptor {
public:
void IncComputedCount() { ++computed_count_; }
@@ -280,35 +289,38 @@ class ObjectDescriptor {
return computed_properties_;
}
- void CreateTemplates(Isolate* isolate) {
- Factory* factory = isolate->factory();
+ void CreateTemplates(LocalIsolate* isolate) {
+ auto* factory = isolate->factory();
descriptor_array_template_ = factory->empty_descriptor_array();
- properties_dictionary_template_ = factory->empty_property_dictionary();
+ properties_dictionary_template_ =
+ Handle<NameDictionary>::cast(factory->empty_property_dictionary());
if (property_count_ || computed_count_ || property_slack_) {
if (HasDictionaryProperties()) {
properties_dictionary_template_ = NameDictionary::New(
- isolate, property_count_ + computed_count_ + property_slack_);
+ isolate, property_count_ + computed_count_ + property_slack_,
+ AllocationType::kOld);
} else {
descriptor_array_template_ = DescriptorArray::Allocate(
- isolate, 0, property_count_ + property_slack_);
+ isolate, 0, property_count_ + property_slack_,
+ AllocationType::kOld);
}
}
elements_dictionary_template_ =
element_count_ || computed_count_
- ? NumberDictionary::New(isolate, element_count_ + computed_count_)
+ ? NumberDictionary::New(isolate, element_count_ + computed_count_,
+ AllocationType::kOld)
: factory->empty_slow_element_dictionary();
computed_properties_ =
computed_count_
- ? factory->NewFixedArray(computed_count_ *
- ClassBoilerplate::kFullComputedEntrySize)
+ ? factory->NewFixedArray(computed_count_, AllocationType::kOld)
: factory->empty_fixed_array();
temp_handle_ = handle(Smi::zero(), isolate);
}
- void AddConstant(Isolate* isolate, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attribs) {
+ void AddConstant(LocalIsolate* isolate, Handle<Name> name,
+ Handle<Object> value, PropertyAttributes attribs) {
bool is_accessor = value->IsAccessorInfo();
DCHECK(!value->IsAccessorPair());
if (HasDictionaryProperties()) {
@@ -326,7 +338,7 @@ class ObjectDescriptor {
}
}
- void AddNamedProperty(Isolate* isolate, Handle<Name> name,
+ void AddNamedProperty(LocalIsolate* isolate, Handle<Name> name,
ClassBoilerplate::ValueKind value_kind,
int value_index) {
Smi value = Smi::FromInt(value_index);
@@ -341,7 +353,7 @@ class ObjectDescriptor {
}
}
- void AddIndexedProperty(Isolate* isolate, uint32_t element,
+ void AddIndexedProperty(LocalIsolate* isolate, uint32_t element,
ClassBoilerplate::ValueKind value_kind,
int value_index) {
Smi value = Smi::FromInt(value_index);
@@ -363,12 +375,11 @@ class ObjectDescriptor {
next_enumeration_index_ = next_index;
}
- void Finalize(Isolate* isolate) {
+ void Finalize(LocalIsolate* isolate) {
if (HasDictionaryProperties()) {
+ DCHECK_EQ(current_computed_index_, computed_properties_->length());
properties_dictionary_template_->set_next_enumeration_index(
next_enumeration_index_);
- computed_properties_ = FixedArray::ShrinkOrEmpty(
- isolate, computed_properties_, current_computed_index_);
} else {
DCHECK(descriptor_array_template_->IsSortedNoDuplicates());
}
@@ -390,36 +401,56 @@ class ObjectDescriptor {
Handle<Object> temp_handle_;
};
+template <typename LocalIsolate>
void ClassBoilerplate::AddToPropertiesTemplate(
- Isolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
+ LocalIsolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value) {
AddToDictionaryTemplate(isolate, dictionary, name, key_index, value_kind,
value);
}
+template void ClassBoilerplate::AddToPropertiesTemplate(
+ Isolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
+ int key_index, ClassBoilerplate::ValueKind value_kind, Smi value);
+template void ClassBoilerplate::AddToPropertiesTemplate(
+ OffThreadIsolate* isolate, Handle<NameDictionary> dictionary,
+ Handle<Name> name, int key_index, ClassBoilerplate::ValueKind value_kind,
+ Smi value);
+template <typename LocalIsolate>
void ClassBoilerplate::AddToElementsTemplate(
- Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ LocalIsolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value) {
AddToDictionaryTemplate(isolate, dictionary, key, key_index, value_kind,
value);
}
+template void ClassBoilerplate::AddToElementsTemplate(
+ Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ int key_index, ClassBoilerplate::ValueKind value_kind, Smi value);
+template void ClassBoilerplate::AddToElementsTemplate(
+ OffThreadIsolate* isolate, Handle<NumberDictionary> dictionary,
+ uint32_t key, int key_index, ClassBoilerplate::ValueKind value_kind,
+ Smi value);
+template <typename LocalIsolate>
Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
- Isolate* isolate, ClassLiteral* expr) {
+ LocalIsolate* isolate, ClassLiteral* expr) {
// Create a non-caching handle scope to ensure that the temporary handle used
// by ObjectDescriptor for passing Smis around does not corrupt handle cache
// in CanonicalHandleScope.
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
- ObjectDescriptor static_desc(kMinimumClassPropertiesCount);
- ObjectDescriptor instance_desc(kMinimumPrototypePropertiesCount);
+ typename LocalIsolate::HandleScopeType scope(isolate);
+ auto* factory = isolate->factory();
+ ObjectDescriptor<LocalIsolate> static_desc(kMinimumClassPropertiesCount);
+ ObjectDescriptor<LocalIsolate> instance_desc(
+ kMinimumPrototypePropertiesCount);
for (int i = 0; i < expr->public_members()->length(); i++) {
ClassLiteral::Property* property = expr->public_members()->at(i);
- ObjectDescriptor& desc =
+ ObjectDescriptor<LocalIsolate>& desc =
property->is_static() ? static_desc : instance_desc;
if (property->is_computed_name()) {
- desc.IncComputedCount();
+ if (property->kind() != ClassLiteral::Property::FIELD) {
+ desc.IncComputedCount();
+ }
} else {
if (property->key()->AsLiteral()->IsPropertyName()) {
desc.IncPropertiesCount();
@@ -500,7 +531,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
continue;
}
- ObjectDescriptor& desc =
+ ObjectDescriptor<LocalIsolate>& desc =
property->is_static() ? static_desc : instance_desc;
if (property->is_computed_name()) {
int computed_name_index = dynamic_argument_index;
@@ -516,8 +547,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
desc.AddIndexedProperty(isolate, index, value_kind, value_index);
} else {
- Handle<String> name =
- key_literal->AsRawPropertyName()->string().get<Factory>();
+ Handle<String> name = key_literal->AsRawPropertyName()->string();
DCHECK(name->IsInternalizedString());
desc.AddNamedProperty(isolate, name, value_kind, value_index);
}
@@ -543,8 +573,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
static_desc.Finalize(isolate);
instance_desc.Finalize(isolate);
- Handle<ClassBoilerplate> class_boilerplate =
- Handle<ClassBoilerplate>::cast(factory->NewFixedArray(kBoileplateLength));
+ Handle<ClassBoilerplate> class_boilerplate = Handle<ClassBoilerplate>::cast(
+ factory->NewFixedArray(kBoileplateLength, AllocationType::kOld));
class_boilerplate->set_flags(0);
class_boilerplate->set_install_class_name_accessor(
@@ -568,5 +598,10 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
return scope.CloseAndEscape(class_boilerplate);
}
+template Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
+ Isolate* isolate, ClassLiteral* expr);
+template Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
+ OffThreadIsolate* isolate, ClassLiteral* expr);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index c3b5c914e9..79ac4d2f6a 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -70,7 +70,6 @@ class ArrayBoilerplateDescription
void BriefPrintDetails(std::ostream& os);
private:
- DECL_INT_ACCESSORS(flags)
TQ_OBJECT_CONSTRUCTORS(ArrayBoilerplateDescription)
};
@@ -117,17 +116,20 @@ class ClassBoilerplate : public FixedArray {
DECL_ACCESSORS(instance_elements_template, Object)
DECL_ACCESSORS(instance_computed_properties, FixedArray)
- static void AddToPropertiesTemplate(Isolate* isolate,
+ template <typename LocalIsolate>
+ static void AddToPropertiesTemplate(LocalIsolate* isolate,
Handle<NameDictionary> dictionary,
Handle<Name> name, int key_index,
ValueKind value_kind, Smi value);
- static void AddToElementsTemplate(Isolate* isolate,
+ template <typename LocalIsolate>
+ static void AddToElementsTemplate(LocalIsolate* isolate,
Handle<NumberDictionary> dictionary,
uint32_t key, int key_index,
ValueKind value_kind, Smi value);
- static Handle<ClassBoilerplate> BuildClassBoilerplate(Isolate* isolate,
+ template <typename LocalIsolate>
+ static Handle<ClassBoilerplate> BuildClassBoilerplate(LocalIsolate* isolate,
ClassLiteral* expr);
enum {
@@ -141,8 +143,6 @@ class ClassBoilerplate : public FixedArray {
kBoileplateLength // last element
};
- static const int kFullComputedEntrySize = 2;
-
private:
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 5fe8889779..f0f3a21baf 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -537,8 +537,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
int enumeration_index = original_details.dictionary_index();
DCHECK_GT(enumeration_index, 0);
details = details.set_index(enumeration_index);
- dictionary->SetEntry(isolate(), dictionary_entry(), *name(), *value,
- details);
+ dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
property_details_ = details;
}
state_ = DATA;
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index ba37b98fbe..4317be20c6 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -574,7 +574,7 @@ void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
if (is_stable()) {
mark_unstable();
dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPrototypeCheckGroup);
+ DependentCode::kPrototypeCheckGroup);
}
}
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index d2adfd1060..2dc288628c 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -21,8 +21,10 @@
#include "src/objects/oddball.h"
#include "src/objects/property.h"
#include "src/objects/transitions-inl.h"
+#include "src/roots/roots.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
+#include "torque-generated/field-offsets-tq.h"
namespace v8 {
namespace internal {
@@ -73,15 +75,21 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file,
os << "]\n";
}
-Map Map::GetStructMap(Isolate* isolate, InstanceType type) {
+Map Map::GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type) {
Map map;
switch (type) {
-#define MAKE_CASE(TYPE, Name, name) \
- case TYPE: \
- map = ReadOnlyRoots(isolate).name##_map(); \
+#define MAKE_CASE(TYPE, Name, name) \
+ case TYPE: \
+ map = roots.name##_map(); \
break;
STRUCT_LIST(MAKE_CASE)
#undef MAKE_CASE
+#define MAKE_CASE(_, TYPE, Name, name) \
+ case TYPE: \
+ map = roots.name##_map(); \
+ break;
+ TORQUE_INTERNAL_CLASS_LIST_GENERATOR(MAKE_CASE, _)
+#undef MAKE_CASE
default:
UNREACHABLE();
}
@@ -259,6 +267,9 @@ VisitorId Map::GetVisitorId(Map map) {
case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
return kVisitUncompiledDataWithPreparseData;
+ case COVERAGE_INFO_TYPE:
+ return kVisitCoverageInfo;
+
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
@@ -284,8 +295,8 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_PROMISE_TYPE:
case JS_REG_EXP_TYPE:
case JS_REG_EXP_STRING_ITERATOR_TYPE:
- case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
- case JS_FINALIZATION_GROUP_TYPE:
+ case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_REGISTRY_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_V8_BREAK_ITERATOR_TYPE:
case JS_COLLATOR_TYPE:
@@ -356,6 +367,12 @@ VisitorId Map::GetVisitorId(Map map) {
case SYNTHETIC_MODULE_TYPE:
return kVisitSyntheticModule;
+#define MAKE_TQ_CASE(TYPE, Name) \
+ case TYPE: \
+ return kVisit##Name;
+ TORQUE_BODY_DESCRIPTOR_LIST(MAKE_TQ_CASE)
+#undef MAKE_TQ_CASE
+
default:
UNREACHABLE();
}
@@ -594,10 +611,10 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
DCHECK(!constructor_or_backpointer().IsFunctionTemplateInfo());
set_is_deprecated(true);
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Deprecate", *this, Map()));
+ LOG(isolate, MapEvent("Deprecate", handle(*this, isolate), Handle<Map>()));
}
dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kTransitionGroup);
+ DependentCode::kTransitionGroup);
NotifyLeafMapLayoutChange(isolate);
}
@@ -778,7 +795,7 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
new_representation, wrapped_type);
field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kFieldOwnerGroup);
+ DependentCode::kFieldOwnerGroup);
if (FLAG_trace_generalization) {
map->PrintGeneralization(
@@ -1511,7 +1528,7 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
}
}
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
+ LOG(isolate, MapEvent("Normalize", fast_map, new_map, reason));
}
fast_map->NotifyLeafMapLayoutChange(isolate);
return new_map;
@@ -1697,12 +1714,12 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Transition", *parent, *child, "prototype", *name));
+ LOG(isolate, MapEvent("Transition", parent, child, "prototype", name));
}
} else {
TransitionsAccessor(isolate, parent).Insert(name, child, flag);
if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Transition", *parent, *child, "", *name));
+ LOG(isolate, MapEvent("Transition", parent, child, "", name));
}
}
}
@@ -1748,8 +1765,8 @@ Handle<Map> Map::CopyReplaceDescriptors(
(map->is_prototype_map() ||
!(flag == INSERT_TRANSITION &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions()))) {
- LOG(isolate, MapEvent("ReplaceDescriptors", *map, *result, reason,
- maybe_name.is_null() ? Name() : *name));
+ LOG(isolate, MapEvent("ReplaceDescriptors", map, result, reason,
+ maybe_name.is_null() ? Handle<HeapObject>() : name));
}
return result;
}
@@ -2193,7 +2210,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
// Deoptimize all code that embeds the previous initial map.
initial_map->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kInitialMapChangedGroup);
+ DependentCode::kInitialMapChangedGroup);
if (!result->EquivalentToForNormalization(*map,
CLEAR_INOBJECT_PROPERTIES)) {
result =
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 60f1fb6e9c..0b817e3bab 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -25,6 +25,7 @@ enum InstanceType : uint16_t;
#define DATA_ONLY_VISITOR_ID_LIST(V) \
V(BigInt) \
V(ByteArray) \
+ V(CoverageInfo) \
V(DataObject) \
V(FeedbackMetadata) \
V(FixedDoubleArray) \
@@ -83,6 +84,13 @@ enum InstanceType : uint16_t;
V(WeakArray) \
V(WeakCell)
+#define TORQUE_OBJECT_BODY_TO_VISITOR_ID_LIST_ADAPTER(V, TYPE, TypeName) \
+ V(TypeName)
+
+#define TORQUE_VISITOR_ID_LIST(V) \
+ TORQUE_BODY_DESCRIPTOR_LIST_GENERATOR( \
+ TORQUE_OBJECT_BODY_TO_VISITOR_ID_LIST_ADAPTER, V)
+
// Objects with the same visitor id are processed in the same way by
// the heap visitors. The visitor ids for data only objects must precede
// other visitor ids. We rely on kDataOnlyVisitorIdCount for quick check
@@ -91,8 +99,9 @@ enum VisitorId {
#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
DATA_ONLY_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL) kDataOnlyVisitorIdCount,
POINTER_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+ TORQUE_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
#undef VISITOR_ID_ENUM_DECL
- kVisitorIdCount
+ kVisitorIdCount
};
enum class ObjectFields {
@@ -253,7 +262,9 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(relaxed_bit_field, byte)
// Bit positions for |bit_field|.
- using Bits1 = TorqueGeneratedMapBitFields1Fields;
+ struct Bits1 {
+ DEFINE_TORQUE_GENERATED_MAP_BIT_FIELDS1()
+ };
//
// Bit field 2.
@@ -261,7 +272,9 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
// Bit positions for |bit_field2|.
- using Bits2 = TorqueGeneratedMapBitFields2Fields;
+ struct Bits2 {
+ DEFINE_TORQUE_GENERATED_MAP_BIT_FIELDS2()
+ };
//
// Bit field 3.
@@ -273,7 +286,9 @@ class Map : public HeapObject {
V8_INLINE void clear_padding();
// Bit positions for |bit_field3|.
- using Bits3 = TorqueGeneratedMapBitFields3Fields;
+ struct Bits3 {
+ DEFINE_TORQUE_GENERATED_MAP_BIT_FIELDS3()
+ };
// Ensure that Torque-defined bit widths for |bit_field3| are as expected.
STATIC_ASSERT(Bits3::EnumLengthBits::kSize == kDescriptorIndexBitCount);
@@ -787,7 +802,7 @@ class Map : public HeapObject {
inline bool CanTransition() const;
- static Map GetStructMap(Isolate* isolate, InstanceType type);
+ static Map GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type);
#define DECL_TESTER(Type, ...) inline bool Is##Type##Map() const;
INSTANCE_TYPE_CHECKERS(DECL_TESTER)
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index aaf790cc8a..ce03c39500 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -36,12 +36,8 @@ ACCESSORS(Module, exception, Object, kExceptionOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
-TQ_SMI_ACCESSORS(SourceTextModule, dfs_index)
-TQ_SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index)
-TQ_SMI_ACCESSORS(SourceTextModule, flags)
BOOL_ACCESSORS(SourceTextModule, flags, async, kAsyncBit)
BOOL_ACCESSORS(SourceTextModule, flags, async_evaluating, kAsyncEvaluatingBit)
-TQ_SMI_ACCESSORS(SourceTextModule, pending_async_dependencies)
ACCESSORS(SourceTextModule, async_parent_modules, ArrayList,
kAsyncParentModulesOffset)
ACCESSORS(SourceTextModule, top_level_capability, HeapObject,
@@ -53,11 +49,6 @@ SourceTextModuleInfo SourceTextModule::info() const {
: GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
}
-TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, module_request)
-TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, cell_index)
-TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, beg_pos)
-TQ_SMI_ACCESSORS(SourceTextModuleInfoEntry, end_pos)
-
OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfo, FixedArray)
CAST_ACCESSOR(SourceTextModuleInfo)
@@ -121,10 +112,13 @@ class UnorderedModuleSet
};
void SourceTextModule::AddAsyncParentModule(Isolate* isolate,
- Handle<SourceTextModule> module) {
+ Handle<SourceTextModule> module,
+ Handle<SourceTextModule> parent) {
+ Handle<ArrayList> async_parent_modules(module->async_parent_modules(),
+ isolate);
Handle<ArrayList> new_array_list =
- ArrayList::Add(isolate, handle(async_parent_modules(), isolate), module);
- set_async_parent_modules(*new_array_list);
+ ArrayList::Add(isolate, async_parent_modules, parent);
+ module->set_async_parent_modules(*new_array_list);
}
Handle<SourceTextModule> SourceTextModule::GetAsyncParentModule(
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 783beb12a5..533d8b000e 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -69,14 +69,13 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
int NameShortPrint(Vector<char> str);
// Mask constant for checking if a name has a computed hash code
- // and if it is a string that is an array index. The least significant bit
+ // and if it is a string that is an integer index. The least significant bit
// indicates whether a hash code has been computed. If the hash code has
// been computed the 2nd bit tells whether the string can be used as an
- // array index.
+ // integer index (up to MAX_SAFE_INTEGER).
static const int kHashNotComputedMask = 1;
- static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kIsNotIntegerIndexMask = 1 << 2;
- static const int kNofHashBitFields = 3;
+ static const int kIsNotIntegerIndexMask = 1 << 1;
+ static const int kNofHashBitFields = 2;
// Shift constant retrieving hash code from hash field.
static const int kHashShift = kNofHashBitFields;
@@ -126,11 +125,11 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
static const unsigned int kDoesNotContainCachedArrayIndexMask =
(~static_cast<unsigned>(kMaxCachedArrayIndexLength)
<< ArrayIndexLengthBits::kShift) |
- kIsNotArrayIndexMask;
+ kIsNotIntegerIndexMask;
// Value of empty hash field indicating that the hash is not computed.
static const int kEmptyHashField =
- kIsNotIntegerIndexMask | kIsNotArrayIndexMask | kHashNotComputedMask;
+ kIsNotIntegerIndexMask | kHashNotComputedMask;
protected:
static inline bool IsHashFieldComputed(uint32_t field);
@@ -139,9 +138,10 @@ class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
};
// ES6 symbols.
-class Symbol : public TorqueGeneratedSymbol<Symbol, Name>,
- public TorqueGeneratedSymbolFlagsFields {
+class Symbol : public TorqueGeneratedSymbol<Symbol, Name> {
public:
+ DEFINE_TORQUE_GENERATED_SYMBOL_FLAGS()
+
// [is_private]: Whether this is a private symbol. Private symbols can only
// be used to designate own properties of objects.
DECL_BOOLEAN_ACCESSORS(is_private)
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 2110ad772b..11b5c034c9 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -5,6 +5,10 @@
#ifndef V8_OBJECTS_OBJECT_LIST_MACROS_H_
#define V8_OBJECTS_OBJECT_LIST_MACROS_H_
+#include "torque-generated/instance-types-tq.h"
+
+#define TORQUE_INTERNAL_CLASS_NAMES_ADAPTER(V, NAME, Name, name) V(Name)
+
namespace v8 {
namespace internal {
@@ -140,8 +144,8 @@ class ZoneForwardList;
V(JSDataView) \
V(JSDate) \
V(JSError) \
- V(JSFinalizationGroup) \
- V(JSFinalizationGroupCleanupIterator) \
+ V(JSFinalizationRegistry) \
+ V(JSFinalizationRegistryCleanupIterator) \
V(JSFunction) \
V(JSFunctionOrBoundFunction) \
V(JSGeneratorObject) \
@@ -233,7 +237,8 @@ class ZoneForwardList;
V(WasmTableObject) \
V(WeakFixedArray) \
V(WeakArrayList) \
- V(WeakCell)
+ V(WeakCell) \
+ TORQUE_INTERNAL_CLASS_LIST_GENERATOR(TORQUE_INTERNAL_CLASS_NAMES_ADAPTER, V)
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 518f59507b..2c913c9bc5 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -229,14 +229,6 @@
TaggedField<Smi, offset>::Relaxed_Store(*this, Smi::FromInt(value)); \
}
-#define TQ_SMI_ACCESSORS(holder, name) \
- int holder::name() const { \
- return TorqueGenerated##holder<holder, Super>::name().value(); \
- } \
- void holder::set_##name(int value) { \
- TorqueGenerated##holder<holder, Super>::set_##name(Smi::FromInt(value)); \
- }
-
#define BOOL_GETTER(holder, field, name, offset) \
bool holder::name() const { return BooleanBit::get(field(), offset); }
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 872acdec22..60c508e336 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -248,6 +248,27 @@ class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class JSFinalizationRegistry::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, JSObject::BodyDescriptor::kStartOffset,
+ kNextDirtyOffset, v);
+ IterateCustomWeakPointer(obj, kNextDirtyOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kNextDirtyOffset + kTaggedSize,
+ object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -402,7 +423,7 @@ class V8_EXPORT_PRIVATE SmallOrderedHashTable<Derived>::BodyDescriptor final
static inline int SizeOf(Map map, HeapObject obj) {
Derived table = Derived::cast(obj);
- return table.SizeFor(table.Capacity());
+ return Derived::SizeFor(table.Capacity());
}
};
@@ -600,6 +621,20 @@ class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
+class CoverageInfo::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ CoverageInfo info = CoverageInfo::cast(object);
+ return CoverageInfo::SizeFor(info.slot_count());
+ }
+};
+
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
@@ -656,7 +691,7 @@ class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
SeqOneByteString string = SeqOneByteString::cast(obj);
- return string.SizeFor(string.synchronized_length());
+ return SeqOneByteString::SizeFor(string.synchronized_length());
}
};
@@ -670,7 +705,7 @@ class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
SeqTwoByteString string = SeqTwoByteString::cast(obj);
- return string.SizeFor(string.synchronized_length());
+ return SeqTwoByteString::SizeFor(string.synchronized_length());
}
};
@@ -912,6 +947,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3, p4);
case FEEDBACK_VECTOR_TYPE:
return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3, p4);
+ case COVERAGE_INFO_TYPE:
+ return Op::template apply<CoverageInfo::BodyDescriptor>(p1, p2, p3, p4);
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
@@ -942,8 +979,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
- case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
- case JS_FINALIZATION_GROUP_TYPE:
+ case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
+ case JS_FINALIZATION_REGISTRY_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_V8_BREAK_ITERATOR_TYPE:
case JS_COLLATOR_TYPE:
@@ -1063,6 +1100,12 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case SYNTHETIC_MODULE_TYPE:
return Op::template apply<SyntheticModule::BodyDescriptor>(p1, p2, p3,
p4);
+#define MAKE_TORQUE_BODY_DESCRIPTOR_APPLY(TYPE, TypeName) \
+ case TYPE: \
+ return Op::template apply<TypeName::BodyDescriptor>(p1, p2, p3, p4);
+ TORQUE_BODY_DESCRIPTOR_LIST(MAKE_TORQUE_BODY_DESCRIPTOR_APPLY)
+#undef MAKE_TORQUE_BODY_DESCRIPTOR_APPLY
+
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
diff --git a/deps/v8/src/objects/objects-body-descriptors.h b/deps/v8/src/objects/objects-body-descriptors.h
index 728708f436..13adf4c3db 100644
--- a/deps/v8/src/objects/objects-body-descriptors.h
+++ b/deps/v8/src/objects/objects-body-descriptors.h
@@ -180,6 +180,10 @@ class SubclassBodyDescriptor final : public BodyDescriptorBase {
}
};
+#define TORQUE_BODY_DESCRIPTOR_LIST_ADAPTER(V, TYPE, TypeName) V(TYPE, TypeName)
+#define TORQUE_BODY_DESCRIPTOR_LIST(V) \
+ TORQUE_BODY_DESCRIPTOR_LIST_GENERATOR(TORQUE_BODY_DESCRIPTOR_LIST_ADAPTER, V)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 24176ac65c..a830e13ed1 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -159,9 +159,7 @@ namespace internal {
wasm_indirect_function_table) \
V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data)
-#define STRUCT_LIST_GENERATOR(V, _) \
- STRUCT_LIST_GENERATOR_BASE(V, _) \
- TORQUE_STRUCT_LIST_GENERATOR(V, _)
+#define STRUCT_LIST_GENERATOR(V, _) STRUCT_LIST_GENERATOR_BASE(V, _)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
@@ -176,6 +174,15 @@ namespace internal {
// Produces (Map, struct_name_map, StructNameMap) entries
#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
+// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
+#define TORQUE_INTERNAL_CLASS_LIST_MAPS_ADAPTER(V, NAME, Name, name) \
+ V(Map, name##_map, Name##Map)
+
+// Produces (NAME, Name, name) entries.
+#define TORQUE_INTERNAL_CLASS_MAPS_LIST(V) \
+ TORQUE_INTERNAL_CLASS_LIST_GENERATOR( \
+ TORQUE_INTERNAL_CLASS_LIST_MAPS_ADAPTER, V)
+
//
// The following macros define list of allocation size objects and list of
// their maps.
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index fdfb251823..9877b67c19 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -40,6 +40,7 @@
#include "src/objects/smi-inl.h"
#include "src/objects/tagged-field-inl.h"
#include "src/objects/tagged-impl-inl.h"
+#include "src/objects/tagged-index.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
#include "torque-generated/class-definitions-tq-inl.h"
@@ -74,6 +75,10 @@ DEF_GETTER(HeapObject, IsClassBoilerplate, bool) {
return IsFixedArrayExact(isolate);
}
+bool Object::IsTaggedIndex() const {
+ return IsSmi() && TaggedIndex::IsValid(TaggedIndex(ptr()).value());
+}
+
#define IS_TYPE_FUNCTION_DEF(type_) \
bool Object::Is##type_() const { \
return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
@@ -90,6 +95,9 @@ IS_TYPE_FUNCTION_DEF(SmallOrderedHashTable)
bool Object::Is##Type(Isolate* isolate) const { \
return Is##Type(ReadOnlyRoots(isolate)); \
} \
+ bool Object::Is##Type(OffThreadIsolate* isolate) const { \
+ return Is##Type(ReadOnlyRoots(isolate)); \
+ } \
bool Object::Is##Type(ReadOnlyRoots roots) const { \
return *this == roots.Value(); \
} \
@@ -99,6 +107,9 @@ IS_TYPE_FUNCTION_DEF(SmallOrderedHashTable)
bool HeapObject::Is##Type(Isolate* isolate) const { \
return Object::Is##Type(isolate); \
} \
+ bool HeapObject::Is##Type(OffThreadIsolate* isolate) const { \
+ return Object::Is##Type(isolate); \
+ } \
bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
return Object::Is##Type(roots); \
} \
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 9b53019297..c455629651 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -28,13 +28,17 @@
#include "src/common/message-template.h"
#include "src/date/date.h"
#include "src/debug/debug.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/execution/arguments.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/microtask-queue.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/execution/protectors-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
@@ -122,6 +126,7 @@
#include "torque-generated/class-definitions-tq-inl.h"
#include "torque-generated/internal-class-definitions-tq-inl.h"
+#include "torque-generated/objects-body-descriptors-tq-inl.h"
namespace v8 {
namespace internal {
@@ -581,7 +586,7 @@ MaybeHandle<Object> Object::ConvertToIndex(Isolate* isolate,
if (input->IsUndefined(isolate)) return handle(Smi::zero(), isolate);
ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(isolate, input), Object);
if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
- double len = DoubleToInteger(input->Number()) + 0.0;
+ double len = DoubleToInteger(input->Number());
auto js_len = isolate->factory()->NewNumber(len);
if (len < 0.0 || len > kMaxSafeInteger) {
THROW_NEW_ERROR(isolate, NewRangeError(error_index, js_len), Object);
@@ -1317,6 +1322,8 @@ FunctionTemplateRareData FunctionTemplateInfo::AllocateFunctionTemplateRareData(
FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
Handle<FunctionTemplateRareData> rare_data =
i::Handle<FunctionTemplateRareData>::cast(struct_obj);
+ rare_data->set_c_function(Smi(0));
+ rare_data->set_c_signature(Smi(0));
function_template_info->set_rare_data(*rare_data);
return *rare_data;
}
@@ -2152,7 +2159,8 @@ void ClassPositions::BriefPrintDetails(std::ostream& os) {
}
void ArrayBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
- os << " " << elements_kind() << ", " << Brief(constant_elements());
+ os << " " << ElementsKindToString(elements_kind()) << ", "
+ << Brief(constant_elements());
}
void CallableTask::BriefPrintDetails(std::ostream& os) {
@@ -2270,9 +2278,21 @@ int HeapObject::SizeFromMap(Map map) const {
PreparseData data = PreparseData::unchecked_cast(*this);
return PreparseData::SizeFor(data.data_length(), data.children_length());
}
+#define MAKE_TORQUE_SIZE_FOR(TYPE, TypeName) \
+ if (instance_type == TYPE) { \
+ TypeName instance = TypeName::unchecked_cast(*this); \
+ return TypeName::SizeFor(instance); \
+ }
+ TORQUE_BODY_DESCRIPTOR_LIST(MAKE_TORQUE_SIZE_FOR)
+#undef MAKE_TORQUE_SIZE_FOR
+
if (instance_type == CODE_TYPE) {
return Code::unchecked_cast(*this).CodeSize();
}
+ if (instance_type == COVERAGE_INFO_TYPE) {
+ return CoverageInfo::SizeFor(
+ CoverageInfo::unchecked_cast(*this).slot_count());
+ }
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
return EmbedderDataArray::SizeFor(
EmbedderDataArray::unchecked_cast(*this).length());
@@ -4262,13 +4282,22 @@ Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
EnsureSpaceInFixedArray(isolate, array, length));
}
-Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
+template <typename LocalIsolate>
+Handle<DescriptorArray> DescriptorArray::Allocate(LocalIsolate* isolate,
int nof_descriptors,
- int slack) {
+ int slack,
+ AllocationType allocation) {
return nof_descriptors + slack == 0
? isolate->factory()->empty_descriptor_array()
- : isolate->factory()->NewDescriptorArray(nof_descriptors, slack);
+ : isolate->factory()->NewDescriptorArray(nof_descriptors, slack,
+ allocation);
}
+template Handle<DescriptorArray> DescriptorArray::Allocate(
+ Isolate* isolate, int nof_descriptors, int slack,
+ AllocationType allocation);
+template Handle<DescriptorArray> DescriptorArray::Allocate(
+ OffThreadIsolate* isolate, int nof_descriptors, int slack,
+ AllocationType allocation);
void DescriptorArray::Initialize(EnumCache enum_cache,
HeapObject undefined_value,
@@ -4622,7 +4651,6 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
value <<= String::ArrayIndexValueBits::kShift;
value |= length << String::ArrayIndexLengthBits::kShift;
- DCHECK_EQ(value & String::kIsNotArrayIndexMask, 0);
DCHECK_EQ(value & String::kIsNotIntegerIndexMask, 0);
DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
Name::ContainsCachedArrayIndex(value));
@@ -4700,8 +4728,9 @@ int Script::GetEvalPosition(Isolate* isolate, Handle<Script> script) {
return position;
}
-void Script::InitLineEnds(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
+template <typename LocalIsolate>
+// static
+void Script::InitLineEnds(LocalIsolate* isolate, Handle<Script> script) {
if (!script->line_ends().IsUndefined(isolate)) return;
DCHECK(script->type() != Script::TYPE_WASM ||
script->source_mapping_url().IsString());
@@ -4720,11 +4749,17 @@ void Script::InitLineEnds(Handle<Script> script) {
DCHECK(script->line_ends().IsFixedArray());
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Script::InitLineEnds(
+ Isolate* isolate, Handle<Script> script);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Script::InitLineEnds(
+ OffThreadIsolate* isolate, Handle<Script> script);
+
bool Script::GetPositionInfo(Handle<Script> script, int position,
PositionInfo* info, OffsetFlag offset_flag) {
// For wasm, we do not create an artificial line_ends array, but do the
// translation directly.
- if (script->type() != Script::TYPE_WASM) InitLineEnds(script);
+ if (script->type() != Script::TYPE_WASM)
+ InitLineEnds(script->GetIsolate(), script);
return script->GetPositionInfo(position, info, offset_flag);
}
@@ -4882,8 +4917,9 @@ Object Script::GetNameOrSourceURL() {
return name();
}
+template <typename LocalIsolate>
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- Isolate* isolate, const FunctionLiteral* fun) {
+ LocalIsolate* isolate, const FunctionLiteral* fun) {
CHECK_NE(fun->function_literal_id(), kFunctionLiteralIdInvalid);
// If this check fails, the problem is most probably the function id
// renumbering done by AstFunctionLiteralIdReindexer; in particular, that
@@ -4898,6 +4934,10 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
}
return handle(SharedFunctionInfo::cast(heap_object), isolate);
}
+template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
+ Isolate* isolate, const FunctionLiteral* fun);
+template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
+ OffThreadIsolate* isolate, const FunctionLiteral* fun);
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
@@ -4951,6 +4991,7 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
// All flags default to false or 0, except ConstructAsBuiltinBit just because
// we're using the kIllegal builtin.
set_flags(ConstructAsBuiltinBit::encode(true));
+ set_flags2(0);
UpdateFunctionMapIndex();
@@ -5316,15 +5357,17 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
CodeDisableOptEvent(handle(abstract_code(), GetIsolate()),
handle(*this, GetIsolate())));
if (FLAG_trace_opt) {
- PrintF("[disabled optimization for ");
- ShortPrint();
- PrintF(", reason: %s]\n", GetBailoutReason(reason));
+ CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
+ PrintF(scope.file(), "[disabled optimization for ");
+ ShortPrint(scope.file());
+ PrintF(scope.file(), ", reason: %s]\n", GetBailoutReason(reason));
}
}
// static
+template <typename LocalIsolate>
void SharedFunctionInfo::InitFromFunctionLiteral(
- Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit, bool is_toplevel) {
DCHECK(!shared_info->name_or_scope_info().IsScopeInfo());
@@ -5344,6 +5387,14 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
IsClassConstructor(lit->kind()));
shared_info->set_requires_instance_members_initializer(
lit->requires_instance_members_initializer());
+ DCHECK_IMPLIES(lit->class_scope_has_private_brand(),
+ IsClassConstructor(lit->kind()));
+ shared_info->set_class_scope_has_private_brand(
+ lit->class_scope_has_private_brand());
+ DCHECK_IMPLIES(lit->has_static_private_methods_or_accessors(),
+ IsClassConstructor(lit->kind()));
+ shared_info->set_has_static_private_methods_or_accessors(
+ lit->has_static_private_methods_or_accessors());
shared_info->set_is_toplevel(is_toplevel);
DCHECK(shared_info->outer_scope_info().IsTheHole());
@@ -5381,20 +5432,29 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
ProducedPreparseData* scope_data = lit->produced_preparse_data();
if (scope_data != nullptr) {
- Handle<PreparseData> preparse_data =
- scope_data->Serialize(shared_info->GetIsolate());
+ Handle<PreparseData> preparse_data = scope_data->Serialize(isolate);
data = isolate->factory()->NewUncompiledDataWithPreparseData(
- lit->inferred_name(), lit->start_position(), lit->end_position(),
- preparse_data);
+ lit->GetInferredName(isolate), lit->start_position(),
+ lit->end_position(), preparse_data);
} else {
data = isolate->factory()->NewUncompiledDataWithoutPreparseData(
- lit->inferred_name(), lit->start_position(), lit->end_position());
+ lit->GetInferredName(isolate), lit->start_position(),
+ lit->end_position());
}
shared_info->set_uncompiled_data(*data);
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void SharedFunctionInfo::
+ InitFromFunctionLiteral<Isolate>(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info,
+ FunctionLiteral* lit, bool is_toplevel);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void SharedFunctionInfo::
+ InitFromFunctionLiteral<OffThreadIsolate>(
+ OffThreadIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ FunctionLiteral* lit, bool is_toplevel);
+
uint16_t SharedFunctionInfo::get_property_estimate_from_literal(
FunctionLiteral* literal) {
int estimate = literal->expected_property_count();
@@ -5408,7 +5468,11 @@ uint16_t SharedFunctionInfo::get_property_estimate_from_literal(
void SharedFunctionInfo::UpdateExpectedNofPropertiesFromEstimate(
FunctionLiteral* literal) {
- set_expected_nof_properties(get_property_estimate_from_literal(literal));
+ // Limit actual estimate to fit in a 8 bit field, we will never allocate
+ // more than this in any case.
+ STATIC_ASSERT(JSObject::kMaxInObjectProperties <= kMaxUInt8);
+ int estimate = get_property_estimate_from_literal(literal);
+ set_expected_nof_properties(std::min(estimate, kMaxUInt8));
}
void SharedFunctionInfo::UpdateAndFinalizeExpectedNofPropertiesFromEstimate(
@@ -5983,10 +6047,20 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
// 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
// «promise, resolution, thenAction»).
+
+ // According to HTML, we use the context of the then function (|thenAction|)
+ // as the context of the microtask. See step 3 of HTML's EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
+ Handle<NativeContext> then_context;
+ if (!JSReceiver::GetContextForMicrotask(Handle<JSReceiver>::cast(then_action))
+ .ToHandle(&then_context)) {
+ then_context = isolate->native_context();
+ }
+
Handle<PromiseResolveThenableJobTask> task =
isolate->factory()->NewPromiseResolveThenableJobTask(
promise, Handle<JSReceiver>::cast(then_action),
- Handle<JSReceiver>::cast(resolution), isolate->native_context());
+ Handle<JSReceiver>::cast(resolution), then_context);
if (isolate->debug()->is_active() && resolution->IsJSPromise()) {
// Mark the dependency of the new {promise} on the {resolution}.
Object::SetProperty(isolate, resolution,
@@ -5994,8 +6068,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
promise)
.Check();
}
- MicrotaskQueue* microtask_queue =
- isolate->native_context()->microtask_queue();
+ MicrotaskQueue* microtask_queue = then_context->microtask_queue();
if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
// 13. Return undefined.
@@ -6031,6 +6104,9 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(task);
reactions = handle(reaction->next(), isolate);
+ // According to HTML, we use the context of the appropriate handler as the
+ // context of the microtask. See step 3 of HTML's EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
Handle<NativeContext> handler_context;
Handle<HeapObject> primary_handler;
@@ -6043,17 +6119,18 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
secondary_handler = handle(reaction->fulfill_handler(), isolate);
}
+ bool has_handler_context = false;
if (primary_handler->IsJSReceiver()) {
- JSReceiver::GetContextForMicrotask(
- Handle<JSReceiver>::cast(primary_handler))
- .ToHandle(&handler_context);
+ has_handler_context = JSReceiver::GetContextForMicrotask(
+ Handle<JSReceiver>::cast(primary_handler))
+ .ToHandle(&handler_context);
}
- if (handler_context.is_null() && secondary_handler->IsJSReceiver()) {
- JSReceiver::GetContextForMicrotask(
- Handle<JSReceiver>::cast(secondary_handler))
- .ToHandle(&handler_context);
+ if (!has_handler_context && secondary_handler->IsJSReceiver()) {
+ has_handler_context = JSReceiver::GetContextForMicrotask(
+ Handle<JSReceiver>::cast(secondary_handler))
+ .ToHandle(&handler_context);
}
- if (handler_context.is_null()) handler_context = isolate->native_context();
+ if (!has_handler_context) handler_context = isolate->native_context();
STATIC_ASSERT(
static_cast<int>(PromiseReaction::kSize) ==
@@ -6073,6 +6150,11 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
static_cast<int>(
PromiseFulfillReactionJobTask::kPromiseOrCapabilityOffset));
+ STATIC_ASSERT(
+ static_cast<int>(
+ PromiseReaction::kContinuationPreservedEmbedderDataOffset) ==
+ static_cast<int>(PromiseFulfillReactionJobTask::
+ kContinuationPreservedEmbedderDataOffset));
} else {
DisallowHeapAllocation no_gc;
task->synchronized_set_map(
@@ -6086,6 +6168,11 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
static_cast<int>(PromiseReaction::kPromiseOrCapabilityOffset) ==
static_cast<int>(
PromiseRejectReactionJobTask::kPromiseOrCapabilityOffset));
+ STATIC_ASSERT(
+ static_cast<int>(
+ PromiseReaction::kContinuationPreservedEmbedderDataOffset) ==
+ static_cast<int>(PromiseRejectReactionJobTask::
+ kContinuationPreservedEmbedderDataOffset));
}
MicrotaskQueue* microtask_queue = handler_context->microtask_queue();
@@ -6213,37 +6300,59 @@ void JSRegExp::MarkTierUpForNextExec() {
namespace {
+bool IsLineTerminator(int c) {
+ // Expected to return true for '\n', '\r', 0x2028, and 0x2029.
+ return unibrow::IsLineTerminator(static_cast<unibrow::uchar>(c));
+}
+
+// TODO(jgruber): Consider merging CountAdditionalEscapeChars and
+// WriteEscapedRegExpSource into a single function to deduplicate dispatch logic
+// and move related code closer to each other.
template <typename Char>
-int CountRequiredEscapes(Handle<String> source) {
+int CountAdditionalEscapeChars(Handle<String> source, bool* needs_escapes_out) {
DisallowHeapAllocation no_gc;
int escapes = 0;
+ bool needs_escapes = false;
bool in_char_class = false;
Vector<const Char> src = source->GetCharVector<Char>(no_gc);
for (int i = 0; i < src.length(); i++) {
const Char c = src[i];
if (c == '\\') {
- // Escape. Skip next character;
- i++;
+ if (i + 1 < src.length() && IsLineTerminator(src[i + 1])) {
+ // This '\' is ignored since the next character itself will be escaped.
+ escapes--;
+ } else {
+ // Escape. Skip next character, which will be copied verbatim;
+ i++;
+ }
} else if (c == '/' && !in_char_class) {
// Not escaped forward-slash needs escape.
+ needs_escapes = true;
escapes++;
} else if (c == '[') {
in_char_class = true;
} else if (c == ']') {
in_char_class = false;
} else if (c == '\n') {
+ needs_escapes = true;
escapes++;
} else if (c == '\r') {
+ needs_escapes = true;
escapes++;
} else if (static_cast<int>(c) == 0x2028) {
+ needs_escapes = true;
escapes += std::strlen("\\u2028") - 1;
} else if (static_cast<int>(c) == 0x2029) {
+ needs_escapes = true;
escapes += std::strlen("\\u2029") - 1;
} else {
- DCHECK(!unibrow::IsLineTerminator(static_cast<unibrow::uchar>(c)));
+ DCHECK(!IsLineTerminator(c));
}
}
DCHECK(!in_char_class);
+ DCHECK_GE(escapes, 0);
+ DCHECK_IMPLIES(escapes != 0, needs_escapes);
+ *needs_escapes_out = needs_escapes;
return escapes;
}
@@ -6263,33 +6372,42 @@ Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
int d = 0;
bool in_char_class = false;
while (s < src.length()) {
- if (src[s] == '\\') {
- // Escape. Copy this and next character.
- dst[d++] = src[s++];
+ const Char c = src[s];
+ if (c == '\\') {
+ if (s + 1 < src.length() && IsLineTerminator(src[s + 1])) {
+ // This '\' is ignored since the next character itself will be escaped.
+ s++;
+ continue;
+ } else {
+ // Escape. Copy this and next character.
+ dst[d++] = src[s++];
+ }
if (s == src.length()) break;
- } else if (src[s] == '/' && !in_char_class) {
+ } else if (c == '/' && !in_char_class) {
// Not escaped forward-slash needs escape.
dst[d++] = '\\';
- } else if (src[s] == '[') {
+ } else if (c == '[') {
in_char_class = true;
- } else if (src[s] == ']') {
+ } else if (c == ']') {
in_char_class = false;
- } else if (src[s] == '\n') {
+ } else if (c == '\n') {
WriteStringToCharVector(dst, &d, "\\n");
s++;
continue;
- } else if (src[s] == '\r') {
+ } else if (c == '\r') {
WriteStringToCharVector(dst, &d, "\\r");
s++;
continue;
- } else if (static_cast<int>(src[s]) == 0x2028) {
+ } else if (static_cast<int>(c) == 0x2028) {
WriteStringToCharVector(dst, &d, "\\u2028");
s++;
continue;
- } else if (static_cast<int>(src[s]) == 0x2029) {
+ } else if (static_cast<int>(c) == 0x2029) {
WriteStringToCharVector(dst, &d, "\\u2029");
s++;
continue;
+ } else {
+ DCHECK(!IsLineTerminator(c));
}
dst[d++] = src[s++];
}
@@ -6303,10 +6421,12 @@ MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
DCHECK(source->IsFlat());
if (source->length() == 0) return isolate->factory()->query_colon_string();
bool one_byte = String::IsOneByteRepresentationUnderneath(*source);
- int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
- : CountRequiredEscapes<uc16>(source);
- if (escapes == 0) return source;
- int length = source->length() + escapes;
+ bool needs_escapes = false;
+ int additional_escape_chars =
+ one_byte ? CountAdditionalEscapeChars<uint8_t>(source, &needs_escapes)
+ : CountAdditionalEscapeChars<uc16>(source, &needs_escapes);
+ if (!needs_escapes) return source;
+ int length = source->length() + additional_escape_chars;
if (one_byte) {
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
@@ -6461,8 +6581,9 @@ void HashTable<Derived, Shape>::IterateElements(ObjectVisitor* v) {
}
template <typename Derived, typename Shape>
+template <typename LocalIsolate>
Handle<Derived> HashTable<Derived, Shape>::New(
- Isolate* isolate, int at_least_space_for, AllocationType allocation,
+ LocalIsolate* isolate, int at_least_space_for, AllocationType allocation,
MinimumCapacity capacity_option) {
DCHECK_LE(0, at_least_space_for);
DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
@@ -6472,19 +6593,19 @@ Handle<Derived> HashTable<Derived, Shape>::New(
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- isolate->heap()->FatalProcessOutOfMemory("invalid table size");
+ isolate->FatalProcessOutOfHeapMemory("invalid table size");
}
return NewInternal(isolate, capacity, allocation);
}
template <typename Derived, typename Shape>
+template <typename LocalIsolate>
Handle<Derived> HashTable<Derived, Shape>::NewInternal(
- Isolate* isolate, int capacity, AllocationType allocation) {
- Factory* factory = isolate->factory();
+ LocalIsolate* isolate, int capacity, AllocationType allocation) {
+ auto* factory = isolate->factory();
int length = EntryToIndex(InternalIndex(capacity));
- RootIndex map_root_index = Shape::GetMapRootIndex();
- Handle<FixedArray> array =
- factory->NewFixedArrayWithMap(map_root_index, length, allocation);
+ Handle<FixedArray> array = factory->NewFixedArrayWithMap(
+ Shape::GetMap(ReadOnlyRoots(isolate)), length, allocation);
Handle<Derived> table = Handle<Derived>::cast(array);
table->SetNumberOfElements(0);
@@ -6606,8 +6727,10 @@ void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots) {
}
template <typename Derived, typename Shape>
+template <typename LocalIsolate>
Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
- Isolate* isolate, Handle<Derived> table, int n, AllocationType allocation) {
+ LocalIsolate* isolate, Handle<Derived> table, int n,
+ AllocationType allocation) {
if (table->HasSufficientCapacityToAdd(n)) return table;
int capacity = table->Capacity();
@@ -6625,9 +6748,6 @@ Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
return new_table;
}
-template bool
-HashTable<NameDictionary, NameDictionaryShape>::HasSufficientCapacityToAdd(int);
-
template <typename Derived, typename Shape>
bool HashTable<Derived, Shape>::HasSufficientCapacityToAdd(
int number_of_additional_elements) {
@@ -6824,8 +6944,8 @@ Address LookupString(Isolate* isolate, String string, String source,
return Smi::FromInt(String::ArrayIndexValueBits::decode(hash_field)).ptr();
}
- if ((hash_field & Name::kIsNotArrayIndexMask) == 0) {
- // It is an indexed, but it's not cached.
+ if ((hash_field & Name::kIsNotIntegerIndexMask) == 0) {
+ // It is an index, but it's not cached.
return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
}
@@ -6888,7 +7008,7 @@ Handle<StringSet> StringSet::Add(Isolate* isolate, Handle<StringSet> stringset,
Handle<String> name) {
if (!stringset->Has(isolate, name)) {
stringset = EnsureCapacity(isolate, stringset);
- uint32_t hash = ShapeT::Hash(isolate, *name);
+ uint32_t hash = ShapeT::Hash(ReadOnlyRoots(isolate), *name);
InternalIndex entry = stringset->FindInsertionEntry(hash);
stringset->set(EntryToIndex(entry), *name);
stringset->ElementAdded();
@@ -7202,8 +7322,9 @@ void CompilationCacheTable::Remove(Object value) {
}
template <typename Derived, typename Shape>
+template <typename LocalIsolate>
Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
- Isolate* isolate, int at_least_space_for, AllocationType allocation,
+ LocalIsolate* isolate, int at_least_space_for, AllocationType allocation,
MinimumCapacity capacity_option) {
DCHECK_LE(0, at_least_space_for);
Handle<Derived> dict = Dictionary<Derived, Shape>::New(
@@ -7235,7 +7356,7 @@ int BaseNameDictionary<Derived, Shape>::NextEnumerationIndex(
PropertyDetails details = dictionary->DetailsAt(index);
PropertyDetails new_details = details.set_index(enum_index);
- dictionary->DetailsAtPut(isolate, index, new_details);
+ dictionary->DetailsAtPut(index, new_details);
}
index = PropertyDetails::kInitialIndex + length;
@@ -7251,7 +7372,7 @@ Handle<Derived> Dictionary<Derived, Shape>::DeleteEntry(
Isolate* isolate, Handle<Derived> dictionary, InternalIndex entry) {
DCHECK(Shape::kEntrySize != 3 ||
dictionary->DetailsAt(entry).IsConfigurable());
- dictionary->ClearEntry(isolate, entry);
+ dictionary->ClearEntry(entry);
dictionary->ElementRemoved();
return Shrink(isolate, dictionary);
}
@@ -7270,15 +7391,16 @@ Handle<Derived> Dictionary<Derived, Shape>::AtPut(Isolate* isolate,
// We don't need to copy over the enumeration index.
dictionary->ValueAtPut(entry, *value);
- if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(isolate, entry, details);
+ if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
return dictionary;
}
template <typename Derived, typename Shape>
+template <typename LocalIsolate>
Handle<Derived>
BaseNameDictionary<Derived, Shape>::AddNoUpdateNextEnumerationIndex(
- Isolate* isolate, Handle<Derived> dictionary, Key key, Handle<Object> value,
- PropertyDetails details, InternalIndex* entry_out) {
+ LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
+ Handle<Object> value, PropertyDetails details, InternalIndex* entry_out) {
// Insert element at empty or deleted entry.
return Dictionary<Derived, Shape>::Add(isolate, dictionary, key, value,
details, entry_out);
@@ -7303,14 +7425,16 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
}
template <typename Derived, typename Shape>
-Handle<Derived> Dictionary<Derived, Shape>::Add(Isolate* isolate,
+template <typename LocalIsolate>
+Handle<Derived> Dictionary<Derived, Shape>::Add(LocalIsolate* isolate,
Handle<Derived> dictionary,
Key key, Handle<Object> value,
PropertyDetails details,
InternalIndex* entry_out) {
- uint32_t hash = Shape::Hash(isolate, key);
+ uint32_t hash = Shape::Hash(ReadOnlyRoots(isolate), key);
// Validate that the key is absent.
- SLOW_DCHECK(dictionary->FindEntry(isolate, key).is_not_found());
+ SLOW_DCHECK(
+ dictionary->FindEntry(ReadOnlyRoots(isolate), key).is_not_found());
// Check whether the dictionary should be extended.
dictionary = Derived::EnsureCapacity(isolate, dictionary);
@@ -7318,7 +7442,7 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(Isolate* isolate,
Handle<Object> k = Shape::AsHandle(isolate, key);
InternalIndex entry = dictionary->FindInsertionEntry(hash);
- dictionary->SetEntry(isolate, entry, *k, *value, details);
+ dictionary->SetEntry(entry, *k, *value, details);
DCHECK(dictionary->KeyAt(entry).IsNumber() ||
Shape::Unwrap(dictionary->KeyAt(entry)).IsUniqueName());
dictionary->ElementAdded();
@@ -7853,7 +7977,7 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
details = details.set_cell_type(PropertyCellType::kInvalidated);
cell->set_property_details(details);
cell->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPropertyCellChangedGroup);
+ DependentCode::kPropertyCellChangedGroup);
return new_cell;
}
@@ -7954,7 +8078,7 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
if (!invalidate && (old_type != new_type ||
original_details.IsReadOnly() != details.IsReadOnly())) {
cell->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPropertyCellChangedGroup);
+ DependentCode::kPropertyCellChangedGroup);
}
return cell;
}
@@ -7967,7 +8091,7 @@ void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
if (cell->value() != *new_value) {
cell->set_value(*new_value);
cell->dependent_code().DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPropertyCellChangedGroup);
+ DependentCode::kPropertyCellChangedGroup);
}
}
@@ -8109,92 +8233,112 @@ Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
// Please note this list is compiler dependent.
// Keep this at the end of this file
-template class HashTable<StringTable, StringTableShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) HashTable<CompilationCacheTable, CompilationCacheShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) HashTable<ObjectHashTable, ObjectHashTableShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) HashTable<ObjectHashSet, ObjectHashSetShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- ObjectHashTableBase<ObjectHashTable, ObjectHashTableShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) HashTable<EphemeronHashTable, EphemeronHashTableShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- ObjectHashTableBase<EphemeronHashTable, EphemeronHashTableShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- BaseNameDictionary<NameDictionary, NameDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<NameDictionary, NameDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<GlobalDictionary, GlobalDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(
- V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<NumberDictionary, NumberDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
-
-template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
-
-template Handle<NameDictionary>
-HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
- AllocationType,
- MinimumCapacity);
-
-template V8_EXPORT_PRIVATE Handle<NameDictionary>
-HashTable<NameDictionary, NameDictionaryShape>::Shrink(Isolate* isolate,
- Handle<NameDictionary>,
- int additionalCapacity);
-
-template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash(
- ReadOnlyRoots roots);
-
-Maybe<bool> JSFinalizationGroup::Cleanup(
- Isolate* isolate, Handle<JSFinalizationGroup> finalization_group,
+#define EXTERN_DEFINE_HASH_TABLE(DERIVED, SHAPE) \
+ template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
+ HashTable<DERIVED, SHAPE>; \
+ \
+ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::New(Isolate*, int, AllocationType, \
+ MinimumCapacity); \
+ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::New(OffThreadIsolate*, int, AllocationType, \
+ MinimumCapacity); \
+ \
+ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::EnsureCapacity(Isolate*, Handle<DERIVED>, int, \
+ AllocationType); \
+ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle<DERIVED> \
+ HashTable<DERIVED, SHAPE>::EnsureCapacity( \
+ OffThreadIsolate*, Handle<DERIVED>, int, AllocationType);
+
+#define EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(DERIVED, SHAPE) \
+ EXTERN_DEFINE_HASH_TABLE(DERIVED, SHAPE) \
+ template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
+ ObjectHashTableBase<DERIVED, SHAPE>;
+
+#define EXTERN_DEFINE_DICTIONARY(DERIVED, SHAPE) \
+ EXTERN_DEFINE_HASH_TABLE(DERIVED, SHAPE) \
+ template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
+ Dictionary<DERIVED, SHAPE>; \
+ \
+ template V8_EXPORT_PRIVATE Handle<DERIVED> Dictionary<DERIVED, SHAPE>::Add( \
+ Isolate* isolate, Handle<DERIVED>, Key, Handle<Object>, PropertyDetails, \
+ InternalIndex*); \
+ template V8_EXPORT_PRIVATE Handle<DERIVED> Dictionary<DERIVED, SHAPE>::Add( \
+ OffThreadIsolate* isolate, Handle<DERIVED>, Key, Handle<Object>, \
+ PropertyDetails, InternalIndex*);
+
+#define EXTERN_DEFINE_BASE_NAME_DICTIONARY(DERIVED, SHAPE) \
+ EXTERN_DEFINE_DICTIONARY(DERIVED, SHAPE) \
+ template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) \
+ BaseNameDictionary<DERIVED, SHAPE>; \
+ \
+ template V8_EXPORT_PRIVATE Handle<DERIVED> \
+ BaseNameDictionary<DERIVED, SHAPE>::New(Isolate*, int, AllocationType, \
+ MinimumCapacity); \
+ template V8_EXPORT_PRIVATE Handle<DERIVED> \
+ BaseNameDictionary<DERIVED, SHAPE>::New(OffThreadIsolate*, int, \
+ AllocationType, MinimumCapacity); \
+ \
+ template Handle<DERIVED> \
+ BaseNameDictionary<DERIVED, SHAPE>::AddNoUpdateNextEnumerationIndex( \
+ Isolate* isolate, Handle<DERIVED>, Key, Handle<Object>, PropertyDetails, \
+ InternalIndex*); \
+ template Handle<DERIVED> \
+ BaseNameDictionary<DERIVED, SHAPE>::AddNoUpdateNextEnumerationIndex( \
+ OffThreadIsolate* isolate, Handle<DERIVED>, Key, Handle<Object>, \
+ PropertyDetails, InternalIndex*);
+
+EXTERN_DEFINE_HASH_TABLE(StringTable, StringTableShape)
+EXTERN_DEFINE_HASH_TABLE(StringSet, StringSetShape)
+EXTERN_DEFINE_HASH_TABLE(CompilationCacheTable, CompilationCacheShape)
+EXTERN_DEFINE_HASH_TABLE(ObjectHashSet, ObjectHashSetShape)
+
+EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(ObjectHashTable, ObjectHashTableShape)
+EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE(EphemeronHashTable,
+ EphemeronHashTableShape)
+
+EXTERN_DEFINE_DICTIONARY(SimpleNumberDictionary, SimpleNumberDictionaryShape)
+EXTERN_DEFINE_DICTIONARY(NumberDictionary, NumberDictionaryShape)
+
+EXTERN_DEFINE_BASE_NAME_DICTIONARY(NameDictionary, NameDictionaryShape)
+EXTERN_DEFINE_BASE_NAME_DICTIONARY(GlobalDictionary, GlobalDictionaryShape)
+
+#undef EXTERN_DEFINE_HASH_TABLE
+#undef EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE
+#undef EXTERN_DEFINE_DICTIONARY
+#undef EXTERN_DEFINE_BASE_NAME_DICTIONARY
+
+Maybe<bool> JSFinalizationRegistry::Cleanup(
+ Isolate* isolate, Handle<JSFinalizationRegistry> finalization_registry,
Handle<Object> cleanup) {
DCHECK(cleanup->IsCallable());
// Attempt to shrink key_map now, as unregister tokens are held weakly and the
// map is not shrinkable when sweeping dead tokens during GC itself.
- if (!finalization_group->key_map().IsUndefined(isolate)) {
- Handle<SimpleNumberDictionary> key_map = handle(
- SimpleNumberDictionary::cast(finalization_group->key_map()), isolate);
+ if (!finalization_registry->key_map().IsUndefined(isolate)) {
+ Handle<SimpleNumberDictionary> key_map =
+ handle(SimpleNumberDictionary::cast(finalization_registry->key_map()),
+ isolate);
key_map = SimpleNumberDictionary::Shrink(isolate, key_map);
- finalization_group->set_key_map(*key_map);
+ finalization_registry->set_key_map(*key_map);
}
// It's possible that the cleared_cells list is empty, since
- // FinalizationGroup.unregister() removed all its elements before this task
+ // FinalizationRegistry.unregister() removed all its elements before this task
// ran. In that case, don't call the cleanup function.
- if (!finalization_group->cleared_cells().IsUndefined(isolate)) {
+ if (!finalization_registry->cleared_cells().IsUndefined(isolate)) {
// Construct the iterator.
- Handle<JSFinalizationGroupCleanupIterator> iterator;
+ Handle<JSFinalizationRegistryCleanupIterator> iterator;
{
Handle<Map> cleanup_iterator_map(
isolate->native_context()
- ->js_finalization_group_cleanup_iterator_map(),
+ ->js_finalization_registry_cleanup_iterator_map(),
isolate);
- iterator = Handle<JSFinalizationGroupCleanupIterator>::cast(
+ iterator = Handle<JSFinalizationRegistryCleanupIterator>::cast(
isolate->factory()->NewJSObjectFromMap(
cleanup_iterator_map, AllocationType::kYoung,
Handle<AllocationSite>::null()));
- iterator->set_finalization_group(*finalization_group);
+ iterator->set_finalization_registry(*finalization_registry);
}
Handle<Object> args[] = {iterator};
if (Execution::Call(
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 1fa1dde37a..ef437446fd 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -38,6 +38,7 @@
// Inheritance hierarchy:
// - Object
// - Smi (immediate small integer)
+// - TaggedIndex (properly sign-extended immediate small integer)
// - HeapObject (superclass for everything allocated in the heap)
// - JSReceiver (suitable for property access)
// - JSObject
@@ -194,6 +195,7 @@
namespace v8 {
namespace internal {
+class OffThreadIsolate;
struct InliningPosition;
class PropertyDescriptorObject;
@@ -274,6 +276,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
constexpr Object() : TaggedImpl(kNullAddress) {}
explicit constexpr Object(Address ptr) : TaggedImpl(ptr) {}
+ V8_INLINE bool IsTaggedIndex() const;
+
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
V8_INLINE bool Is##Type(const Isolate* isolate) const;
@@ -282,12 +286,14 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
IS_TYPE_FUNCTION_DECL(HashTableBase)
IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DECL
+ V8_INLINE bool IsNumber(ReadOnlyRoots roots) const;
// Oddball checks are faster when they are raw pointer comparisons, so the
// isolate/read-only roots overloads should be preferred where possible.
-#define IS_TYPE_FUNCTION_DECL(Type, Value) \
- V8_INLINE bool Is##Type(Isolate* isolate) const; \
- V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ V8_INLINE bool Is##Type(Isolate* isolate) const; \
+ V8_INLINE bool Is##Type(OffThreadIsolate* isolate) const; \
+ V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
@@ -696,16 +702,17 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
ConvertToString(Isolate* isolate, Handle<Object> input);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToNumberOrNumeric(
Isolate* isolate, Handle<Object> input, Conversion mode);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToInteger(
- Isolate* isolate, Handle<Object> input);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ ConvertToInteger(Isolate* isolate, Handle<Object> input);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToInt32(
Isolate* isolate, Handle<Object> input);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToUint32(
Isolate* isolate, Handle<Object> input);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToLength(
- Isolate* isolate, Handle<Object> input);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToIndex(
- Isolate* isolate, Handle<Object> input, MessageTemplate error_index);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ ConvertToLength(Isolate* isolate, Handle<Object> input);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ ConvertToIndex(Isolate* isolate, Handle<Object> input,
+ MessageTemplate error_index);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Object& obj);
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index bcca03ddca..4b274097b8 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -25,13 +25,9 @@ void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
base::WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
}
-byte Oddball::kind() const {
- return Smi::ToInt(TorqueGeneratedOddball::kind());
-}
+byte Oddball::kind() const { return TorqueGeneratedOddball::kind(); }
-void Oddball::set_kind(byte value) {
- TorqueGeneratedOddball::set_kind(Smi::FromInt(value));
-}
+void Oddball::set_kind(byte value) { TorqueGeneratedOddball::set_kind(value); }
// static
Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index f6b8f972a9..6edd5c3cda 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -73,28 +73,28 @@ OBJECT_CONSTRUCTORS_IMPL(SmallOrderedHashMap,
OBJECT_CONSTRUCTORS_IMPL(SmallOrderedNameDictionary,
SmallOrderedHashTable<SmallOrderedNameDictionary>)
-RootIndex OrderedHashSet::GetMapRootIndex() {
- return RootIndex::kOrderedHashSetMap;
+Handle<Map> OrderedHashSet::GetMap(ReadOnlyRoots roots) {
+ return roots.ordered_hash_set_map_handle();
}
-RootIndex OrderedHashMap::GetMapRootIndex() {
- return RootIndex::kOrderedHashMapMap;
+Handle<Map> OrderedHashMap::GetMap(ReadOnlyRoots roots) {
+ return roots.ordered_hash_map_map_handle();
}
-RootIndex OrderedNameDictionary::GetMapRootIndex() {
- return RootIndex::kOrderedNameDictionaryMap;
+Handle<Map> OrderedNameDictionary::GetMap(ReadOnlyRoots roots) {
+ return roots.ordered_name_dictionary_map_handle();
}
-RootIndex SmallOrderedNameDictionary::GetMapRootIndex() {
- return RootIndex::kSmallOrderedNameDictionaryMap;
+Handle<Map> SmallOrderedNameDictionary::GetMap(ReadOnlyRoots roots) {
+ return roots.small_ordered_name_dictionary_map_handle();
}
-RootIndex SmallOrderedHashMap::GetMapRootIndex() {
- return RootIndex::kSmallOrderedHashMapMap;
+Handle<Map> SmallOrderedHashMap::GetMap(ReadOnlyRoots roots) {
+ return roots.small_ordered_hash_map_map_handle();
}
-RootIndex SmallOrderedHashSet::GetMapRootIndex() {
- return RootIndex::kSmallOrderedHashSetMap;
+Handle<Map> SmallOrderedHashSet::GetMap(ReadOnlyRoots roots) {
+ return roots.small_ordered_hash_set_map_handle();
}
inline Object OrderedHashMap::ValueAt(int entry) {
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index 962224024e..cbf3ba373b 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -9,6 +9,7 @@
#include "src/objects/js-collection-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -27,7 +28,7 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
}
int num_buckets = capacity / kLoadFactor;
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
- Derived::GetMapRootIndex(),
+ Derived::GetMap(ReadOnlyRoots(isolate)),
HashTableStartIndex() + num_buckets + (capacity * kEntrySize),
allocation);
Handle<Derived> table = Handle<Derived>::cast(backing_store);
@@ -397,10 +398,10 @@ MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
return table;
}
-void OrderedNameDictionary::SetEntry(Isolate* isolate, int entry, Object key,
- Object value, PropertyDetails details) {
+void OrderedNameDictionary::SetEntry(int entry, Object key, Object value,
+ PropertyDetails details) {
DisallowHeapAllocation gc;
- DCHECK_IMPLIES(!key.IsName(), key.IsTheHole(isolate));
+ DCHECK_IMPLIES(!key.IsName(), key.IsTheHole());
DisallowHeapAllocation no_gc;
int index = EntryToIndex(entry);
this->set(index, key);
@@ -418,7 +419,7 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
Object hole = ReadOnlyRoots(isolate).the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
- table->SetEntry(isolate, entry, hole, hole, details);
+ table->SetEntry(entry, hole, hole, details);
int nof = table->NumberOfElements();
table->SetNumberOfElements(nof - 1);
@@ -717,10 +718,9 @@ MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
return table;
}
-void SmallOrderedNameDictionary::SetEntry(Isolate* isolate, int entry,
- Object key, Object value,
+void SmallOrderedNameDictionary::SetEntry(int entry, Object key, Object value,
PropertyDetails details) {
- DCHECK_IMPLIES(!key.IsName(), key.IsTheHole(isolate));
+ DCHECK_IMPLIES(!key.IsName(), key.IsTheHole());
SetDataEntry(entry, SmallOrderedNameDictionary::kValueIndex, value);
SetDataEntry(entry, SmallOrderedNameDictionary::kKeyIndex, key);
@@ -765,7 +765,7 @@ Handle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::DeleteEntry(
DisallowHeapAllocation no_gc;
Object hole = ReadOnlyRoots(isolate).the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
- table->SetEntry(isolate, entry, hole, hole, details);
+ table->SetEntry(entry, hole, hole, details);
int nof = table->NumberOfElements();
table->SetNumberOfElements(nof - 1);
@@ -1159,17 +1159,17 @@ MaybeHandle<HeapObject> OrderedNameDictionaryHandler::Add(
isolate, Handle<OrderedNameDictionary>::cast(table), key, value, details);
}
-void OrderedNameDictionaryHandler::SetEntry(Isolate* isolate, HeapObject table,
- int entry, Object key, Object value,
+void OrderedNameDictionaryHandler::SetEntry(HeapObject table, int entry,
+ Object key, Object value,
PropertyDetails details) {
DisallowHeapAllocation no_gc;
if (table.IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table).SetEntry(isolate, entry, key,
- value, details);
+ return SmallOrderedNameDictionary::cast(table).SetEntry(entry, key, value,
+ details);
}
DCHECK(table.IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table).SetEntry(isolate, entry, key, value,
+ return OrderedNameDictionary::cast(table).SetEntry(entry, key, value,
details);
}
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 590846f130..b587960432 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -248,7 +248,7 @@ class V8_EXPORT_PRIVATE OrderedHashSet
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static inline bool Is(Handle<HeapObject> table);
static const int kPrefixSize = 0;
@@ -280,7 +280,7 @@ class V8_EXPORT_PRIVATE OrderedHashMap
static Address GetHash(Isolate* isolate, Address raw_key);
static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static inline bool Is(Handle<HeapObject> table);
static const int kValueOffset = 1;
@@ -602,7 +602,7 @@ class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
V8_EXPORT_PRIVATE bool HasKey(Isolate* isolate, Handle<Object> key);
static inline bool Is(Handle<HeapObject> table);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static Handle<SmallOrderedHashSet> Rehash(Isolate* isolate,
Handle<SmallOrderedHashSet> table,
int new_capacity);
@@ -635,7 +635,7 @@ class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
SmallOrderedHashMap table, Object key);
V8_EXPORT_PRIVATE bool HasKey(Isolate* isolate, Handle<Object> key);
static inline bool Is(Handle<HeapObject> table);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static Handle<SmallOrderedHashMap> Rehash(Isolate* isolate,
Handle<SmallOrderedHashMap> table,
@@ -701,8 +701,8 @@ class OrderedNameDictionary
Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details);
- V8_EXPORT_PRIVATE void SetEntry(Isolate* isolate, int entry, Object key,
- Object value, PropertyDetails details);
+ V8_EXPORT_PRIVATE void SetEntry(int entry, Object key, Object value,
+ PropertyDetails details);
V8_EXPORT_PRIVATE static Handle<OrderedNameDictionary> DeleteEntry(
Isolate* isolate, Handle<OrderedNameDictionary> table, int entry);
@@ -730,7 +730,7 @@ class OrderedNameDictionary
inline int Hash();
static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static inline bool Is(Handle<HeapObject> table);
static const int kValueOffset = 1;
@@ -756,8 +756,8 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
static Handle<HeapObject> DeleteEntry(Isolate* isolate,
Handle<HeapObject> table, int entry);
static int FindEntry(Isolate* isolate, HeapObject table, Name key);
- static void SetEntry(Isolate* isolate, HeapObject table, int entry,
- Object key, Object value, PropertyDetails details);
+ static void SetEntry(HeapObject table, int entry, Object key, Object value,
+ PropertyDetails details);
// Returns the value for entry.
static Object ValueAt(HeapObject table, int entry);
@@ -829,10 +829,10 @@ class SmallOrderedNameDictionary
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
Handle<Name> key, Handle<Object> value, PropertyDetails details);
- V8_EXPORT_PRIVATE void SetEntry(Isolate* isolate, int entry, Object key,
- Object value, PropertyDetails details);
+ V8_EXPORT_PRIVATE void SetEntry(int entry, Object key, Object value,
+ PropertyDetails details);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static inline bool Is(Handle<HeapObject> table);
OBJECT_CONSTRUCTORS(SmallOrderedNameDictionary,
diff --git a/deps/v8/src/objects/ordered-hash-table.tq b/deps/v8/src/objects/ordered-hash-table.tq
index 7c40cb204e..d1b58d93eb 100644
--- a/deps/v8/src/objects/ordered-hash-table.tq
+++ b/deps/v8/src/objects/ordered-hash-table.tq
@@ -27,7 +27,7 @@ const kSmallOrderedHashSetMap: Map = SmallOrderedHashSetMapConstant();
extern class SmallOrderedHashSet extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
- number_of_buckets: uint8;
+ const number_of_buckets: uint8;
@if(TAGGED_SIZE_8_BYTES) padding[5]: uint8;
@ifnot(TAGGED_SIZE_8_BYTES) padding[1]: uint8;
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
@@ -66,7 +66,7 @@ const kSmallOrderedHashMapMap: Map = SmallOrderedHashMapMapConstant();
extern class SmallOrderedHashMap extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
- number_of_buckets: uint8;
+ const number_of_buckets: uint8;
@if(TAGGED_SIZE_8_BYTES) padding[5]: uint8;
@ifnot(TAGGED_SIZE_8_BYTES) padding[1]: uint8;
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
@@ -104,7 +104,7 @@ extern class SmallOrderedNameDictionary extends SmallOrderedHashTable {
hash: int32;
number_of_elements: uint8;
number_of_deleted_elements: uint8;
- number_of_buckets: uint8;
+ const number_of_buckets: uint8;
padding: uint8;
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
NameDictionaryEntry;
diff --git a/deps/v8/src/objects/promise.tq b/deps/v8/src/objects/promise.tq
index a1638e221a..391f742e44 100644
--- a/deps/v8/src/objects/promise.tq
+++ b/deps/v8/src/objects/promise.tq
@@ -22,6 +22,8 @@ const kPromiseReactionFulfillHandlerOffset: constexpr int31
generates 'PromiseReaction::kFulfillHandlerOffset';
const kPromiseReactionPromiseOrCapabilityOffset: constexpr int31
generates 'PromiseReaction::kPromiseOrCapabilityOffset';
+const kPromiseReactionContinuationPreservedEmbedderDataOffset: constexpr int31
+ generates 'PromiseReaction::kContinuationPreservedEmbedderDataOffset';
@generateCppClass
extern class PromiseReaction extends Struct {
@@ -31,6 +33,7 @@ extern class PromiseReaction extends Struct {
// Either a JSPromise (in case of native promises), a PromiseCapability
// (general case), or undefined (in case of await).
promise_or_capability: JSPromise|PromiseCapability|Undefined;
+ continuation_preserved_embedder_data: Object|Undefined;
}
// PromiseReactionJobTask constants
@@ -40,6 +43,10 @@ const kPromiseReactionJobTaskHandlerOffset: constexpr int31
generates 'PromiseReactionJobTask::kHandlerOffset';
const kPromiseReactionJobTaskPromiseOrCapabilityOffset: constexpr int31
generates 'PromiseReactionJobTask::kPromiseOrCapabilityOffset';
+const kPromiseReactionJobTaskContinuationPreservedEmbedderDataOffset:
+ constexpr int31
+ generates 'PromiseReactionJobTask::kContinuationPreservedEmbedderDataOffset'
+ ;
@abstract
@generateCppClass
@@ -50,6 +57,7 @@ extern class PromiseReactionJobTask extends Microtask {
// Either a JSPromise (in case of native promises), a PromiseCapability
// (general case), or undefined (in case of await).
promise_or_capability: JSPromise|PromiseCapability|Undefined;
+ continuation_preserved_embedder_data: Object|Undefined;
}
@generateCppClass
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 2172c0002e..a0cd895fe6 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
-#include "src/base/bit-field.h"
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,19 +18,7 @@ class PropertyDescriptorObject
: public TorqueGeneratedPropertyDescriptorObject<PropertyDescriptorObject,
Struct> {
public:
-#define FLAGS_BIT_FIELDS(V, _) \
- V(IsEnumerableBit, bool, 1, _) \
- V(HasEnumerableBit, bool, 1, _) \
- V(IsConfigurableBit, bool, 1, _) \
- V(HasConfigurableBit, bool, 1, _) \
- V(IsWritableBit, bool, 1, _) \
- V(HasWritableBit, bool, 1, _) \
- V(HasValueBit, bool, 1, _) \
- V(HasGetBit, bool, 1, _) \
- V(HasSetBit, bool, 1, _)
-
- DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
-#undef FLAGS_BIT_FIELDS
+ DEFINE_TORQUE_GENERATED_PROPERTY_DESCRIPTOR_OBJECT_FLAGS()
static const int kRegularAccessorPropertyBits =
HasEnumerableBit::kMask | HasConfigurableBit::kMask | HasGetBit::kMask |
diff --git a/deps/v8/src/objects/property-descriptor-object.tq b/deps/v8/src/objects/property-descriptor-object.tq
index b71a1e8878..726769f29a 100644
--- a/deps/v8/src/objects/property-descriptor-object.tq
+++ b/deps/v8/src/objects/property-descriptor-object.tq
@@ -4,10 +4,22 @@
#include 'src/objects/property-descriptor-object.h'
+bitfield struct PropertyDescriptorObjectFlags extends uint31 {
+ is_enumerable: bool: 1 bit;
+ has_enumerable: bool: 1 bit;
+ is_configurable: bool: 1 bit;
+ has_configurable: bool: 1 bit;
+ is_writable: bool: 1 bit;
+ has_writable: bool: 1 bit;
+ has_value: bool: 1 bit;
+ has_get: bool: 1 bit;
+ has_set: bool: 1 bit;
+}
+
@generateCppClass
@generatePrint
extern class PropertyDescriptorObject extends Struct {
- flags: Smi;
+ flags: SmiTagged<PropertyDescriptorObjectFlags>;
value: JSAny|TheHole;
get: JSAny|TheHole;
set: JSAny|TheHole;
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index 325de41c2b..9c9a71849f 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -60,7 +60,6 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
Handle<DescriptorArray>(map.instance_descriptors(), isolate);
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
- Name key = descs->GetKey(i);
Handle<Object> value;
if (details.location() == kField) {
if (details.kind() == kData) {
@@ -83,6 +82,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
return false;
}
}
+ Name key = descs->GetKey(i);
ReadOnlyRoots roots(isolate);
if (key == roots.enumerable_string()) {
desc->set_enumerable(value->BooleanValue(isolate));
@@ -355,7 +355,7 @@ Handle<PropertyDescriptorObject> PropertyDescriptor::ToPropertyDescriptorObject(
PropertyDescriptorObject::HasGetBit::encode(has_get()) |
PropertyDescriptorObject::HasSetBit::encode(has_set());
- obj->set_flags(Smi::FromInt(flags));
+ obj->set_flags(flags);
if (has_value()) obj->set_value(*value_);
if (has_get()) obj->set_get(*get_);
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index f1112332ad..4dcd67905c 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -12,6 +12,7 @@
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -60,7 +61,9 @@ bool ScopeInfo::Equals(ScopeInfo other) const {
#endif
// static
-Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
+template <typename LocalIsolate>
+Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
+ Scope* scope,
MaybeHandle<ScopeInfo> outer_scope) {
// Collect variables.
int context_local_count = 0;
@@ -376,6 +379,15 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
return scope_info_handle;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ScopeInfo> ScopeInfo::Create<Isolate>(
+ Isolate* isolate, Zone* zone, Scope* scope,
+ MaybeHandle<ScopeInfo> outer_scope);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<ScopeInfo> ScopeInfo::Create<OffThreadIsolate>(
+ OffThreadIsolate* isolate, Zone* zone, Scope* scope,
+ MaybeHandle<ScopeInfo> outer_scope);
+
// static
Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope) {
@@ -1031,8 +1043,9 @@ std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var_info) {
return os;
}
+template <typename LocalIsolate>
Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
- Isolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ LocalIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
Handle<PrimitiveHeapObject> local_name,
Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index,
int beg_pos, int end_pos) {
@@ -1049,16 +1062,27 @@ Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
return result;
}
+template Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
+ Isolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ Handle<PrimitiveHeapObject> local_name,
+ Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index,
+ int beg_pos, int end_pos);
+template Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
+ OffThreadIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ Handle<PrimitiveHeapObject> local_name,
+ Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index,
+ int beg_pos, int end_pos);
+
+template <typename LocalIsolate>
Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
- Isolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr) {
+ LocalIsolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr) {
// Serialize module requests.
int size = static_cast<int>(descr->module_requests().size());
Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(size);
Handle<FixedArray> module_request_positions =
isolate->factory()->NewFixedArray(size);
for (const auto& elem : descr->module_requests()) {
- module_requests->set(elem.second.index,
- *elem.first->string().get<Factory>());
+ module_requests->set(elem.second.index, *elem.first->string());
module_request_positions->set(elem.second.index,
Smi::FromInt(elem.second.position));
}
@@ -1113,6 +1137,10 @@ Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
result->set(kModuleRequestPositionsIndex, *module_request_positions);
return result;
}
+template Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
+ Isolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr);
+template Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
+ OffThreadIsolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr);
int SourceTextModuleInfo::RegularExportCount() const {
DCHECK_EQ(regular_exports().length() % kRegularExportLength, 0);
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 1eff2c411f..2e7df22562 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -36,8 +36,10 @@ class Zone;
// This object provides quick access to scope info details for runtime
// routines.
-class ScopeInfo : public FixedArray, public TorqueGeneratedScopeFlagsFields {
+class ScopeInfo : public FixedArray {
public:
+ DEFINE_TORQUE_GENERATED_SCOPE_FLAGS()
+
DECL_CAST(ScopeInfo)
DECL_PRINTER(ScopeInfo)
@@ -218,7 +220,9 @@ class ScopeInfo : public FixedArray, public TorqueGeneratedScopeFlagsFields {
bool Equals(ScopeInfo other) const;
#endif
- static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
+ template <typename LocalIsolate>
+ static Handle<ScopeInfo> Create(LocalIsolate* isolate, Zone* zone,
+ Scope* scope,
MaybeHandle<ScopeInfo> outer_scope);
static Handle<ScopeInfo> CreateForWithScope(
Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 3a038c1c38..b9fb7ea4f9 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "src/base/export-template.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
@@ -154,7 +155,9 @@ class Script : public Struct {
bool ContainsAsmModule();
// Init line_ends array with source code positions of line ends.
- V8_EXPORT_PRIVATE static void InitLineEnds(Handle<Script> script);
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ static void InitLineEnds(LocalIsolate* isolate, Handle<Script> script);
// Carries information about a source position.
struct PositionInfo {
@@ -192,8 +195,9 @@ class Script : public Struct {
// Look through the list of existing shared function infos to find one
// that matches the function literal. Return empty handle if not found.
+ template <typename LocalIsolate>
MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
- Isolate* isolate, const FunctionLiteral* fun);
+ LocalIsolate* isolate, const FunctionLiteral* fun);
// Iterate over all script objects on the heap.
class V8_EXPORT_PRIVATE Iterator {
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 9beba6d63b..af1b685ca8 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -110,11 +110,12 @@ INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
UINT16_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
UINT16_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
-UINT16_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
- kExpectedNofPropertiesOffset)
+UINT8_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
UINT16_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
kFunctionTokenOffsetOffset)
RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
Object value = name_or_scope_info();
@@ -184,6 +185,13 @@ int SharedFunctionInfo::function_token_position() const {
}
}
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, class_scope_has_private_brand,
+ SharedFunctionInfo::ClassScopeHasPrivateBrandBit)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
+ has_static_private_methods_or_accessors,
+ SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
+
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
@@ -329,7 +337,7 @@ ScopeInfo SharedFunctionInfo::scope_info() const {
if (maybe_scope_info.IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
- return ScopeInfo::Empty(GetIsolate());
+ return GetReadOnlyRoots().empty_scope_info();
}
void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
@@ -432,7 +440,7 @@ bool SharedFunctionInfo::IsApiFunction() const {
return function_data().IsFunctionTemplateInfo();
}
-FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() {
+FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
DCHECK(IsApiFunction());
return FunctionTemplateInfo::cast(function_data());
}
@@ -619,7 +627,15 @@ void SharedFunctionInfo::ClearPreparseData() {
DCHECK(HasUncompiledDataWithoutPreparseData());
}
-void UncompiledData::Init(
+template <typename LocalIsolate>
+void UncompiledData::Init(LocalIsolate* isolate, String inferred_name,
+ int start_position, int end_position) {
+ set_inferred_name(inferred_name);
+ set_start_position(start_position);
+ set_end_position(end_position);
+}
+
+void UncompiledData::InitAfterBytecodeFlush(
String inferred_name, int start_position, int end_position,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
@@ -630,17 +646,14 @@ void UncompiledData::Init(
set_end_position(end_position);
}
-void UncompiledDataWithPreparseData::Init(
- String inferred_name, int start_position, int end_position,
- PreparseData scope_data,
- std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
- gc_notify_updated_slot) {
- this->UncompiledData::Init(inferred_name, start_position, end_position,
- gc_notify_updated_slot);
+template <typename LocalIsolate>
+void UncompiledDataWithPreparseData::Init(LocalIsolate* isolate,
+ String inferred_name,
+ int start_position, int end_position,
+ PreparseData scope_data) {
+ this->UncompiledData::Init(isolate, inferred_name, start_position,
+ end_position);
set_preparse_data(scope_data);
- gc_notify_updated_slot(
- *this, RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
- scope_data);
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 0a0ed81f3f..fd8dbb6c89 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -99,11 +99,14 @@ class PreparseData
class UncompiledData
: public TorqueGeneratedUncompiledData<UncompiledData, HeapObject> {
public:
- inline void Init(
+ template <typename LocalIsolate>
+ inline void Init(LocalIsolate* isolate, String inferred_name,
+ int start_position, int end_position);
+
+ inline void InitAfterBytecodeFlush(
String inferred_name, int start_position, int end_position,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
- gc_notify_updated_slot =
- [](HeapObject object, ObjectSlot slot, HeapObject target) {});
+ gc_notify_updated_slot);
using BodyDescriptor =
FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
@@ -135,12 +138,10 @@ class UncompiledDataWithPreparseData
public:
DECL_PRINTER(UncompiledDataWithPreparseData)
- inline void Init(
- String inferred_name, int start_position, int end_position,
- PreparseData scope_data,
- std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
- gc_notify_updated_slot =
- [](HeapObject object, ObjectSlot slot, HeapObject target) {});
+ template <typename LocalIsolate>
+ inline void Init(LocalIsolate* isolate, String inferred_name,
+ int start_position, int end_position,
+ PreparseData scope_data);
using BodyDescriptor = SubclassBodyDescriptor<
UncompiledData::BodyDescriptor,
@@ -167,10 +168,11 @@ class InterpreterData : public Struct {
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
-class SharedFunctionInfo : public HeapObject,
- public TorqueGeneratedSharedFunctionInfoFlagsFields {
+class SharedFunctionInfo : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
+ DEFINE_TORQUE_GENERATED_SHARED_FUNCTION_INFO_FLAGS()
+ DEFINE_TORQUE_GENERATED_SHARED_FUNCTION_INFO_FLAGS2()
// This initializes the SharedFunctionInfo after allocation. It must
// initialize all fields, and leave the SharedFunctionInfo in a state where
@@ -278,7 +280,7 @@ class SharedFunctionInfo : public HeapObject,
// [expected_nof_properties]: Expected number of properties for the
// function. The value is only reliable when the function has been compiled.
- DECL_UINT16_ACCESSORS(expected_nof_properties)
+ DECL_UINT8_ACCESSORS(expected_nof_properties)
// [function_literal_id] - uniquely identifies the FunctionLiteral this
// SharedFunctionInfo represents within its script, or -1 if this
@@ -308,7 +310,7 @@ class SharedFunctionInfo : public HeapObject,
inline bool IsApiFunction() const;
inline bool is_class_constructor() const;
- inline FunctionTemplateInfo get_api_func_data();
+ inline FunctionTemplateInfo get_api_func_data() const;
inline void set_api_func_data(FunctionTemplateInfo data);
inline bool HasBytecodeArray() const;
inline BytecodeArray GetBytecodeArray() const;
@@ -401,6 +403,12 @@ class SharedFunctionInfo : public HeapObject,
// [flags] Bit field containing various flags about the function.
DECL_INT32_ACCESSORS(flags)
+ DECL_UINT8_ACCESSORS(flags2)
+
+ // True if the outer class scope contains a private brand for
+ // private instance methdos.
+ DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
+ DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
@@ -553,7 +561,8 @@ class SharedFunctionInfo : public HeapObject,
inline bool has_simple_parameters();
// Initialize a SharedFunctionInfo from a parsed function literal.
- static void InitFromFunctionLiteral(Isolate* isolate,
+ template <typename LocalIsolate>
+ static void InitFromFunctionLiteral(LocalIsolate* isolate,
Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit, bool is_toplevel);
@@ -587,6 +596,7 @@ class SharedFunctionInfo : public HeapObject,
// Dispatched behavior.
DECL_PRINTER(SharedFunctionInfo)
DECL_VERIFIER(SharedFunctionInfo)
+ void SharedFunctionInfoVerify(OffThreadIsolate* isolate);
#ifdef OBJECT_PRINT
void PrintSourceCode(std::ostream& os);
#endif
@@ -611,8 +621,6 @@ class SharedFunctionInfo : public HeapObject,
DECL_CAST(SharedFunctionInfo)
// Constants.
- static const uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
-
static const int kMaximumFunctionTokenOffset = kMaxUInt16 - 1;
static const uint16_t kFunctionTokenOutOfRange = static_cast<uint16_t>(-1);
STATIC_ASSERT(kMaximumFunctionTokenOffset + 1 == kFunctionTokenOutOfRange);
@@ -638,6 +646,8 @@ class SharedFunctionInfo : public HeapObject,
inline bool needs_home_object() const;
private:
+ void SharedFunctionInfoVerify(ReadOnlyRoots roots);
+
// [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
// ScopeInfo.
DECL_ACCESSORS(name_or_scope_info, Object)
@@ -660,7 +670,8 @@ class SharedFunctionInfo : public HeapObject,
inline uint16_t get_property_estimate_from_literal(FunctionLiteral* literal);
- friend class Factory;
+ template <typename Impl>
+ friend class FactoryBase;
friend class V8HeapExplorer;
FRIEND_TEST(PreParserTest, LazyFunctionLength);
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 2eda70da5d..f37cc250bc 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -14,10 +14,9 @@ extern class InterpreterData extends Struct {
interpreter_trampoline: Code;
}
-type FunctionKind extends uint8 constexpr 'v8::internal::FunctionKind';
-type FunctionSyntaxKind extends uint8
-constexpr 'v8::internal::FunctionSyntaxKind';
-type BailoutReason extends uint8 constexpr 'v8::internal::BailoutReason';
+type FunctionKind extends uint8 constexpr 'FunctionKind';
+type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
+type BailoutReason extends uint8 constexpr 'BailoutReason';
bitfield struct SharedFunctionInfoFlags extends uint32 {
// Have FunctionKind first to make it cheaper to access.
@@ -42,6 +41,11 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
private_name_lookup_skips_outer_class: bool: 1 bit;
}
+bitfield struct SharedFunctionInfoFlags2 extends uint8 {
+ class_scope_has_private_brand: bool: 1 bit;
+ has_static_private_methods_or_accessors: bool: 1 bit;
+}
+
extern class SharedFunctionInfo extends HeapObject {
weak function_data: Object;
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
@@ -49,9 +53,9 @@ extern class SharedFunctionInfo extends HeapObject {
script_or_debug_info: Script|DebugInfo|Undefined;
length: int16;
formal_parameter_count: uint16;
- // Currently set to uint16, can be set to uint8 to save space.
- expected_nof_properties: uint16;
function_token_offset: int16;
+ expected_nof_properties: uint8;
+ flags2: SharedFunctionInfoFlags2;
flags: SharedFunctionInfoFlags;
function_literal_id: int32;
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index 7e692b7948..4912831c09 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -49,6 +49,12 @@ void FullObjectSlot::Release_Store(Object value) const {
base::AsAtomicPointer::Release_Store(location(), value.ptr());
}
+Object FullObjectSlot::Relaxed_CompareAndSwap(Object old, Object target) const {
+ Address result = base::AsAtomicPointer::Relaxed_CompareAndSwap(
+ location(), old.ptr(), target.ptr());
+ return Object(result);
+}
+
Object FullObjectSlot::Release_CompareAndSwap(Object old, Object target) const {
Address result = base::AsAtomicPointer::Release_CompareAndSwap(
location(), old.ptr(), target.ptr());
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index 85f6525399..875ed7f82a 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -116,6 +116,7 @@ class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
inline Object Relaxed_Load() const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
+ inline Object Relaxed_CompareAndSwap(Object old, Object target) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
};
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index 6ffdd06f45..f54df2b7ca 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -369,9 +369,14 @@ bool SourceTextModule::RunInitializationCode(Isolate* isolate,
Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
Handle<Object> receiver = isolate->factory()->undefined_value();
- Handle<Object> argv[] = {module};
+
+ Handle<ScopeInfo> scope_info(function->shared().scope_info(), isolate);
+ Handle<Context> context = isolate->factory()->NewModuleContext(
+ module, isolate->native_context(), scope_info);
+ function->set_context(*context);
+
MaybeHandle<Object> maybe_generator =
- Execution::Call(isolate, function, receiver, arraysize(argv), argv);
+ Execution::Call(isolate, function, receiver, 0, {});
Handle<Object> generator;
if (!maybe_generator.ToHandle(&generator)) {
DCHECK(isolate->has_pending_exception());
@@ -1054,7 +1059,7 @@ MaybeHandle<Object> SourceTextModule::InnerModuleEvaluation(
module->IncrementPendingAsyncDependencies();
// 2. Append module to requiredModule.[[AsyncParentModules]].
- required_module->AddAsyncParentModule(isolate, module);
+ AddAsyncParentModule(isolate, required_module, module);
}
} else {
RETURN_ON_EXCEPTION(isolate, Module::Evaluate(isolate, requested_module),
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index f1387635d0..57c84d833d 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -69,8 +69,9 @@ class SourceTextModule
// Appends a tuple of module and generator to the async parent modules
// ArrayList.
- inline void AddAsyncParentModule(Isolate* isolate,
- Handle<SourceTextModule> module);
+ inline static void AddAsyncParentModule(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ Handle<SourceTextModule> parent);
// Returns a SourceTextModule, the
// ith parent in depth first traversal order of a given async child.
@@ -84,13 +85,6 @@ class SourceTextModule
inline void IncrementPendingAsyncDependencies();
inline void DecrementPendingAsyncDependencies();
- // TODO(neis): Don't store those in the module object?
- DECL_INT_ACCESSORS(dfs_index)
- DECL_INT_ACCESSORS(dfs_ancestor_index)
-
- // Storage for boolean flags.
- DECL_INT_ACCESSORS(flags)
-
// Bits for flags.
static const int kAsyncBit = 0;
static const int kAsyncEvaluatingBit = 1;
@@ -107,9 +101,6 @@ class SourceTextModule
// for cycle roots.
DECL_ACCESSORS(top_level_capability, HeapObject)
- // The number of currently evaluating async dependencies of this module.
- DECL_INT_ACCESSORS(pending_async_dependencies)
-
// The parent modules of a given async dependency, use async_parent_modules()
// to retrieve the ArrayList representation.
DECL_ACCESSORS(async_parent_modules, ArrayList)
@@ -196,7 +187,8 @@ class SourceTextModuleInfo : public FixedArray {
public:
DECL_CAST(SourceTextModuleInfo)
- static Handle<SourceTextModuleInfo> New(Isolate* isolate, Zone* zone,
+ template <typename LocalIsolate>
+ static Handle<SourceTextModuleInfo> New(LocalIsolate* isolate, Zone* zone,
SourceTextModuleDescriptor* descr);
inline FixedArray module_requests() const;
@@ -217,7 +209,8 @@ class SourceTextModuleInfo : public FixedArray {
#endif
private:
- friend class Factory;
+ template <typename Impl>
+ friend class FactoryBase;
friend class SourceTextModuleDescriptor;
enum {
kModuleRequestsIndex,
@@ -245,13 +238,9 @@ class SourceTextModuleInfoEntry
DECL_PRINTER(SourceTextModuleInfoEntry)
DECL_VERIFIER(SourceTextModuleInfoEntry)
- DECL_INT_ACCESSORS(module_request)
- DECL_INT_ACCESSORS(cell_index)
- DECL_INT_ACCESSORS(beg_pos)
- DECL_INT_ACCESSORS(end_pos)
-
+ template <typename LocalIsolate>
static Handle<SourceTextModuleInfoEntry> New(
- Isolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ LocalIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
Handle<PrimitiveHeapObject> local_name,
Handle<PrimitiveHeapObject> import_name, int module_request,
int cell_index, int beg_pos, int end_pos);
diff --git a/deps/v8/src/objects/source-text-module.tq b/deps/v8/src/objects/source-text-module.tq
index 0f747976bb..fda0138695 100644
--- a/deps/v8/src/objects/source-text-module.tq
+++ b/deps/v8/src/objects/source-text-module.tq
@@ -28,11 +28,17 @@ extern class SourceTextModule extends Module {
// Lazily initialized on first access. It's the hole before first access and
// a JSObject afterwards.
import_meta: TheHole|JSObject;
+
async_parent_modules: ArrayList;
top_level_capability: JSPromise|Undefined;
+
+ // TODO(neis): Don't store those in the module object?
dfs_index: Smi;
dfs_ancestor_index: Smi;
+
+ // The number of currently evaluating async dependencies of this module.
pending_async_dependencies: Smi;
+
flags: Smi;
}
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index ee3b83c12f..36236f576b 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -53,9 +53,6 @@ BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, kIsPromiseAllBit)
TQ_OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame)
NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
-TQ_SMI_ACCESSORS(StackTraceFrame, frame_index)
-TQ_SMI_ACCESSORS(StackTraceFrame, id)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index ac2913ebed..c15ad1031c 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -71,8 +71,23 @@ Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
// static
Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
Handle<StackTraceFrame> frame) {
- auto name = GetFrameInfo(frame)->script_name_or_source_url();
- return handle(name, frame->GetIsolate());
+ Isolate* isolate = frame->GetIsolate();
+ // TODO(caseq, szuend): the logic below is a workaround for crbug.com/1057211.
+ // We should probably have a dedicated API for the scenario described in the
+ // bug above and make getters of this class behave consistently.
+ // See https://bit.ly/2wkbuIy for further discussion.
+ // Use FrameInfo if it's already there, but avoid initializing it for just
+ // the script name, as it is much more expensive than just getting this
+ // directly.
+ if (!frame->frame_info().IsUndefined()) {
+ auto name = GetFrameInfo(frame)->script_name_or_source_url();
+ return handle(name, isolate);
+ }
+ FrameArrayIterator it(isolate,
+ handle(FrameArray::cast(frame->frame_array()), isolate),
+ frame->frame_index());
+ DCHECK(it.HasFrame());
+ return it.Frame()->GetScriptNameOrSourceUrl();
}
// static
@@ -368,6 +383,16 @@ void SerializeAsmJsWasmStackFrame(Isolate* isolate,
return;
}
+bool IsAnonymousWasmScript(Isolate* isolate, Handle<StackTraceFrame> frame,
+ Handle<Object> url) {
+ DCHECK(url->IsString());
+ Handle<String> anonymous_prefix =
+ isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
+ return (StackTraceFrame::IsWasm(frame) &&
+ StringIndexOf(isolate, Handle<String>::cast(url), anonymous_prefix) >=
+ 0);
+}
+
void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
IncrementalStringBuilder* builder) {
Handle<Object> module_name = StackTraceFrame::GetWasmModuleName(frame);
@@ -386,8 +411,15 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
builder->AppendCString(" (");
}
- const int wasm_func_index = StackTraceFrame::GetWasmFunctionIndex(frame);
+ Handle<Object> url = StackTraceFrame::GetScriptNameOrSourceUrl(frame);
+ if (IsNonEmptyString(url) && !IsAnonymousWasmScript(isolate, frame, url)) {
+ builder->AppendString(Handle<String>::cast(url));
+ } else {
+ builder->AppendCString("<anonymous>");
+ }
+ builder->AppendCString(":");
+ const int wasm_func_index = StackTraceFrame::GetWasmFunctionIndex(frame);
builder->AppendCString("wasm-function[");
builder->AppendInt(wasm_func_index);
builder->AppendCString("]:");
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 2ede430f11..83a24c047a 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -76,8 +76,6 @@ class StackTraceFrame
: public TorqueGeneratedStackTraceFrame<StackTraceFrame, Struct> {
public:
NEVER_READ_ONLY_SPACE
- DECL_INT_ACCESSORS(frame_index)
- DECL_INT_ACCESSORS(id)
// Dispatched behavior.
DECL_PRINTER(StackTraceFrame)
@@ -128,6 +126,7 @@ Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
class IncrementalStringBuilder;
void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
IncrementalStringBuilder* builder);
+V8_EXPORT_PRIVATE
MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
Handle<StackTraceFrame> frame);
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 93453eee21..5dd460a959 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -397,6 +397,13 @@ Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
return string;
}
+Handle<String> String::Flatten(OffThreadIsolate* isolate, Handle<String> string,
+ AllocationType allocation) {
+ // We should never pass non-flat strings to String::Flatten when off-thread.
+ DCHECK(string->IsFlat());
+ return string;
+}
+
uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
@@ -575,8 +582,6 @@ void SlicedString::set_parent(String parent, WriteBarrierMode mode) {
TorqueGeneratedSlicedString<SlicedString, Super>::set_parent(parent, mode);
}
-TQ_SMI_ACCESSORS(SlicedString, offset)
-
Object ConsString::unchecked_first() {
return TaggedField<Object, kFirstOffset>::load(*this);
}
@@ -773,15 +778,11 @@ void StringCharacterStream::VisitTwoByteString(const uint16_t* chars,
bool String::AsArrayIndex(uint32_t* index) {
DisallowHeapAllocation no_gc;
uint32_t field = hash_field();
- // The {IsHashFieldComputed} check is not functionally necessary as the
- // subsequent mask includes it; it's here to make the logic more obvious,
- // and the compile will fold it away so it doesn't hurt performance.
- if (IsHashFieldComputed(field) &&
- (field & kDoesNotContainCachedArrayIndexMask) == 0) {
+ if (ContainsCachedArrayIndex(field)) {
*index = ArrayIndexValueBits::decode(field);
return true;
}
- if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
+ if (IsHashFieldComputed(field) && (field & kIsNotIntegerIndexMask)) {
return false;
}
return SlowAsArrayIndex(index);
@@ -789,11 +790,7 @@ bool String::AsArrayIndex(uint32_t* index) {
bool String::AsIntegerIndex(size_t* index) {
uint32_t field = hash_field();
- // The {IsHashFieldComputed} check is not functionally necessary as the
- // subsequent mask includes it; it's here to make the logic more obvious,
- // and the compile will fold it away so it doesn't hurt performance.
- if (IsHashFieldComputed(field) &&
- (field & kDoesNotContainCachedArrayIndexMask) == 0) {
+ if (ContainsCachedArrayIndex(field)) {
*index = ArrayIndexValueBits::decode(field);
return true;
}
diff --git a/deps/v8/src/objects/string-table-inl.h b/deps/v8/src/objects/string-table-inl.h
index 1b7b7d140c..c0bcd2083b 100644
--- a/deps/v8/src/objects/string-table-inl.h
+++ b/deps/v8/src/objects/string-table-inl.h
@@ -32,7 +32,7 @@ bool StringSetShape::IsMatch(String key, Object value) {
return key.Equals(String::cast(value));
}
-uint32_t StringSetShape::Hash(Isolate* isolate, String key) {
+uint32_t StringSetShape::Hash(ReadOnlyRoots roots, String key) {
return key.Hash();
}
@@ -59,7 +59,7 @@ uint32_t StringTableKey::hash() const {
}
// static
-uint32_t StringTableShape::Hash(Isolate* isolate, Key key) {
+uint32_t StringTableShape::Hash(ReadOnlyRoots roots, Key key) {
return key->hash();
}
@@ -72,8 +72,8 @@ uint32_t StringTableShape::HashForObject(ReadOnlyRoots roots, Object object) {
return String::cast(object).Hash();
}
-RootIndex StringTableShape::GetMapRootIndex() {
- return RootIndex::kStringTableMap;
+Handle<Map> StringTableShape::GetMap(ReadOnlyRoots roots) {
+ return roots.string_table_map_handle();
}
} // namespace internal
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index e6619db4ba..54af9a4452 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -41,13 +41,13 @@ class V8_EXPORT_PRIVATE StringTableShape : public BaseShape<StringTableKey*> {
public:
static inline bool IsMatch(Key key, Object value);
- static inline uint32_t Hash(Isolate* isolate, Key key);
+ static inline uint32_t Hash(ReadOnlyRoots roots, Key key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
- static inline RootIndex GetMapRootIndex();
+ static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
@@ -55,6 +55,8 @@ class V8_EXPORT_PRIVATE StringTableShape : public BaseShape<StringTableKey*> {
class SeqOneByteString;
+EXTERN_DECLARE_HASH_TABLE(StringTable, StringTableShape)
+
// StringTable.
//
// No special elements in the prefix and the element size is 1
@@ -98,13 +100,15 @@ class V8_EXPORT_PRIVATE StringTable
class StringSetShape : public BaseShape<String> {
public:
static inline bool IsMatch(String key, Object value);
- static inline uint32_t Hash(Isolate* isolate, String key);
+ static inline uint32_t Hash(ReadOnlyRoots roots, String key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
};
+EXTERN_DECLARE_HASH_TABLE(StringSet, StringSetShape)
+
class StringSet : public HashTable<StringSet, StringSetShape> {
public:
V8_EXPORT_PRIVATE static Handle<StringSet> New(Isolate* isolate);
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index cf91a10bd0..8c96900bbe 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -4,6 +4,7 @@
#include "src/objects/string.h"
+#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h" // For LooksValid implementation.
#include "src/heap/read-only-heap.h"
@@ -688,7 +689,7 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
}
template <typename SourceChar>
-static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
+static void CalculateLineEndsImpl(std::vector<int>* line_ends,
Vector<const SourceChar> src,
bool include_ending_line) {
const int src_len = src.length();
@@ -708,7 +709,8 @@ static void CalculateLineEndsImpl(Isolate* isolate, std::vector<int>* line_ends,
}
}
-Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
+template <typename LocalIsolate>
+Handle<FixedArray> String::CalculateLineEnds(LocalIsolate* isolate,
Handle<String> src,
bool include_ending_line) {
src = Flatten(isolate, src);
@@ -723,21 +725,29 @@ Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
String::FlatContent content = src->GetFlatContent(no_allocation);
DCHECK(content.IsFlat());
if (content.IsOneByte()) {
- CalculateLineEndsImpl(isolate, &line_ends, content.ToOneByteVector(),
+ CalculateLineEndsImpl(&line_ends, content.ToOneByteVector(),
include_ending_line);
} else {
- CalculateLineEndsImpl(isolate, &line_ends, content.ToUC16Vector(),
+ CalculateLineEndsImpl(&line_ends, content.ToUC16Vector(),
include_ending_line);
}
}
int line_count = static_cast<int>(line_ends.size());
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(line_count, AllocationType::kOld);
for (int i = 0; i < line_count; i++) {
array->set(i, Smi::FromInt(line_ends[i]));
}
return array;
}
+template Handle<FixedArray> String::CalculateLineEnds(Isolate* isolate,
+ Handle<String> src,
+ bool include_ending_line);
+template Handle<FixedArray> String::CalculateLineEnds(OffThreadIsolate* isolate,
+ Handle<String> src,
+ bool include_ending_line);
+
bool String::SlowEquals(String other) {
DisallowHeapAllocation no_gc;
// Fast check: negative check with lengths.
@@ -1371,7 +1381,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
if (length <= kMaxCachedArrayIndexLength) {
Hash(); // Force computation of hash code.
uint32_t field = hash_field();
- if ((field & kIsNotArrayIndexMask) != 0) return false;
+ if ((field & kIsNotIntegerIndexMask) != 0) return false;
*index = ArrayIndexValueBits::decode(field);
return true;
}
@@ -1386,12 +1396,7 @@ bool String::SlowAsIntegerIndex(size_t* index) {
if (length <= kMaxCachedArrayIndexLength) {
Hash(); // Force computation of hash code.
uint32_t field = hash_field();
- if ((field & kIsNotArrayIndexMask) != 0) {
- // If it was short but it's not an array index, then it can't be an
- // integer index either.
- DCHECK_NE(0, field & kIsNotIntegerIndexMask);
- return false;
- }
+ if ((field & kIsNotIntegerIndexMask) != 0) return false;
*index = ArrayIndexValueBits::decode(field);
return true;
}
@@ -1652,6 +1657,37 @@ String ConsStringIterator::NextLeaf(bool* blew_stack) {
UNREACHABLE();
}
+const byte* String::AddressOfCharacterAt(int start_index,
+ const DisallowHeapAllocation& no_gc) {
+ DCHECK(IsFlat());
+ String subject = *this;
+ if (subject.IsConsString()) {
+ subject = ConsString::cast(subject).first();
+ } else if (subject.IsSlicedString()) {
+ start_index += SlicedString::cast(subject).offset();
+ subject = SlicedString::cast(subject).parent();
+ }
+ if (subject.IsThinString()) {
+ subject = ThinString::cast(subject).actual();
+ }
+ CHECK_LE(0, start_index);
+ CHECK_LE(start_index, subject.length());
+ if (subject.IsSeqOneByteString()) {
+ return reinterpret_cast<const byte*>(
+ SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
+ } else if (subject.IsSeqTwoByteString()) {
+ return reinterpret_cast<const byte*>(
+ SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
+ } else if (subject.IsExternalOneByteString()) {
+ return reinterpret_cast<const byte*>(
+ ExternalOneByteString::cast(subject).GetChars() + start_index);
+ } else {
+ DCHECK(subject.IsExternalTwoByteString());
+ return reinterpret_cast<const byte*>(
+ ExternalTwoByteString::cast(subject).GetChars() + start_index);
+ }
+}
+
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
String source, uint16_t* sink, int from, int to);
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 941d5f3c7c..616a4b967b 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -161,6 +161,11 @@ class String : public TorqueGeneratedString<String, Name> {
template <typename Char>
inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
+ // Returns the address of the character at an offset into this string.
+ // Requires: this->IsFlat()
+ const byte* AddressOfCharacterAt(int start_index,
+ const DisallowHeapAllocation& no_gc);
+
// Get and set the length of the string using acquire loads and release
// stores.
DECL_SYNCHRONIZED_INT_ACCESSORS(length)
@@ -203,6 +208,9 @@ class String : public TorqueGeneratedString<String, Name> {
static inline Handle<String> Flatten(
Isolate* isolate, Handle<String> string,
AllocationType allocation = AllocationType::kYoung);
+ static inline Handle<String> Flatten(
+ OffThreadIsolate* isolate, Handle<String> string,
+ AllocationType allocation = AllocationType::kYoung);
// Tries to return the content of a flat string as a structure holding either
// a flat vector of char or of uc16.
@@ -454,7 +462,8 @@ class String : public TorqueGeneratedString<String, Name> {
static inline ConsString VisitFlat(Visitor* visitor, String string,
int offset = 0);
- static Handle<FixedArray> CalculateLineEnds(Isolate* isolate,
+ template <typename LocalIsolate>
+ static Handle<FixedArray> CalculateLineEnds(LocalIsolate* isolate,
Handle<String> string,
bool include_ending_line);
@@ -673,7 +682,6 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
public:
inline void set_parent(String parent,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- DECL_INT_ACCESSORS(offset)
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t Get(int index);
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index b7d297d9a1..b8d9714b36 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -6,7 +6,7 @@
@generateCppClass
@reserveBitsInInstanceType(6)
extern class String extends Name {
- length: int32;
+ const length: int32;
}
@generateCppClass
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index a7bbc77f55..fbbdd3038b 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -34,9 +34,6 @@ void Struct::InitializeBody(int object_size) {
}
}
-TQ_SMI_ACCESSORS(ClassPositions, start)
-TQ_SMI_ACCESSORS(ClassPositions, end)
-
Object AccessorPair::get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
}
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index 8ee84a59cc..e28785ce6e 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -68,9 +68,6 @@ class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> {
class ClassPositions
: public TorqueGeneratedClassPositions<ClassPositions, Struct> {
public:
- DECL_INT_ACCESSORS(start)
- DECL_INT_ACCESSORS(end)
-
// Dispatched behavior.
DECL_PRINTER(ClassPositions)
void BriefPrintDetails(std::ostream& os);
diff --git a/deps/v8/src/objects/tagged-index.h b/deps/v8/src/objects/tagged-index.h
new file mode 100644
index 0000000000..c7d6a85a0d
--- /dev/null
+++ b/deps/v8/src/objects/tagged-index.h
@@ -0,0 +1,84 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_INDEX_H_
+#define V8_OBJECTS_TAGGED_INDEX_H_
+
+#include "src/common/globals.h"
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// TaggedIndex represents integer values that can be stored in 31 bits.
+// The on 32-bit architectures ptr_ value has the following format:
+// [31 bit signed int] 0
+// The on 64-bit architectures ptr_ value has the following format:
+// [32 bits of sign-extended lower part][31 bit signed int] 0
+// Thus, on 32-bit architectures TaggedIndex is exactly the same as Smi but
+// on 64-bit architectures TaggedIndex differs from Smi in the following
+// aspects:
+// 1) TaggedIndex payload is always 31 bit independent of the Smi payload size
+// 2) TaggedIndex is always properly sign-extended independent of whether
+// pointer compression is enabled or not. In the former case, upper 32 bits
+// of a Smi value may contain 0 or sign or isolate root value.
+//
+// Given the above constraints TaggedIndex has the following properties:
+// 1) it still looks like a Smi from GC point of view and therefore it's safe
+// to pass TaggedIndex values to runtime functions or builtins on the stack
+// 2) since the TaggedIndex values are already properly sign-extended it's
+// safe to use them as indices in offset-computation functions.
+class TaggedIndex : public Object {
+ public:
+ // This replaces the OBJECT_CONSTRUCTORS macro, because TaggedIndex are
+ // special in that we want them to be constexprs.
+ constexpr TaggedIndex() : Object() {}
+ explicit constexpr TaggedIndex(Address ptr) : Object(ptr) {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK(HAS_SMI_TAG(ptr));
+#endif
+ }
+
+ // Returns the integer value.
+ inline intptr_t value() const {
+ // Truncate and shift down (requires >> to be sign extending).
+ return static_cast<intptr_t>(ptr()) >> kSmiTagSize;
+ }
+
+ // Convert a value to a TaggedIndex object.
+ static inline TaggedIndex FromIntptr(intptr_t value) {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK(TaggedIndex::IsValid(value));
+#endif
+ return TaggedIndex((static_cast<Address>(value) << kSmiTagSize) | kSmiTag);
+ }
+
+ // Returns whether value can be represented in a TaggedIndex.
+ static inline bool constexpr IsValid(intptr_t value) {
+ return kMinValue <= value && value <= kMaxValue;
+ }
+
+ DECL_CAST(TaggedIndex)
+
+ // Dispatched behavior.
+ DECL_VERIFIER(TaggedIndex)
+
+ STATIC_ASSERT(kSmiTagSize == 1);
+ static constexpr int kTaggedValueSize = 31;
+ static constexpr intptr_t kMinValue =
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kTaggedValueSize - 1));
+ static constexpr intptr_t kMaxValue = -(kMinValue + 1);
+};
+
+CAST_ACCESSOR(TaggedIndex)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TAGGED_INDEX_H_
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
index 3f84f53687..3718955fb7 100644
--- a/deps/v8/src/objects/template-objects-inl.h
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -18,8 +18,6 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription)
TQ_OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject)
-TQ_SMI_ACCESSORS(CachedTemplateObject, slot_id)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 085727ecb5..094485de50 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -20,8 +20,6 @@ namespace internal {
class CachedTemplateObject final
: public TorqueGeneratedCachedTemplateObject<CachedTemplateObject, Struct> {
public:
- DECL_INT_ACCESSORS(slot_id)
-
static Handle<CachedTemplateObject> New(Isolate* isolate, int slot_id,
Handle<JSArray> template_object,
Handle<HeapObject> next);
diff --git a/deps/v8/src/objects/template.tq b/deps/v8/src/objects/template.tq
index 472d968855..d3e251aa16 100644
--- a/deps/v8/src/objects/template.tq
+++ b/deps/v8/src/objects/template.tq
@@ -23,6 +23,8 @@ extern class FunctionTemplateRareData extends Struct {
instance_template: Object;
instance_call_handler: Object;
access_check_info: Object;
+ c_function: Foreign|Smi;
+ c_signature: Foreign|Smi;
}
@generateCppClass
@@ -41,7 +43,9 @@ extern class FunctionTemplateInfo extends TemplateInfo {
// undefined.
rare_data: HeapObject;
shared_function_info: Object;
+ // Internal field to store a flag bitfield.
flag: Smi;
+ // "length" property of the final JSFunction.
length: Smi;
// Either the_hole or a private symbol. Used to cache the result on
// the receiver under the the cached_property_name when this
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index be58fc12bc..988230b7ae 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -24,9 +24,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateRareData)
NEVER_READ_ONLY_SPACE_IMPL(TemplateInfo)
-TQ_SMI_ACCESSORS(TemplateInfo, number_of_properties)
-
-TQ_SMI_ACCESSORS(FunctionTemplateInfo, length)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
@@ -37,7 +34,6 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, kDoNotCacheBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
-TQ_SMI_ACCESSORS(FunctionTemplateInfo, flag)
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
@@ -50,11 +46,11 @@ FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
}
}
-#define RARE_ACCESSORS(Name, CamelName, Type) \
+#define RARE_ACCESSORS(Name, CamelName, Type, Default) \
DEF_GETTER(FunctionTemplateInfo, Get##CamelName, Type) { \
HeapObject extra = rare_data(isolate); \
HeapObject undefined = GetReadOnlyRoots(isolate).undefined_value(); \
- return extra == undefined ? undefined \
+ return extra == undefined ? Default \
: FunctionTemplateRareData::cast(extra).Name(); \
} \
inline void FunctionTemplateInfo::Set##CamelName( \
@@ -65,14 +61,18 @@ FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
rare_data.set_##Name(*Name); \
}
-RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object)
-RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate, Object)
-RARE_ACCESSORS(parent_template, ParentTemplate, Object)
-RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, Object)
-RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, Object)
-RARE_ACCESSORS(instance_template, InstanceTemplate, Object)
-RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object)
-RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
+RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object, undefined)
+RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate, Object,
+ undefined)
+RARE_ACCESSORS(parent_template, ParentTemplate, Object, undefined)
+RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, Object, undefined)
+RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, Object,
+ undefined)
+RARE_ACCESSORS(instance_template, InstanceTemplate, Object, undefined)
+RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object, undefined)
+RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object, undefined)
+RARE_ACCESSORS(c_function, CFunction, Object, Smi(0))
+RARE_ACCESSORS(c_signature, CSignature, Object, Smi(0))
#undef RARE_ACCESSORS
bool FunctionTemplateInfo::instantiated() {
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index c2cc0855a7..01d40eb29a 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -16,7 +16,6 @@ namespace internal {
class TemplateInfo : public TorqueGeneratedTemplateInfo<TemplateInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
- DECL_INT_ACCESSORS(number_of_properties)
static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
@@ -82,13 +81,10 @@ class FunctionTemplateInfo
DECL_RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object)
DECL_RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
-#undef DECL_RARE_ACCESSORS
-
- // Internal field to store a flag bitfield.
- DECL_INT_ACCESSORS(flag)
- // "length" property of the final JSFunction.
- DECL_INT_ACCESSORS(length)
+ DECL_RARE_ACCESSORS(c_function, CFunction, Object)
+ DECL_RARE_ACCESSORS(c_signature, CSignature, Object)
+#undef DECL_RARE_ACCESSORS
// Begin flag bits ---------------------
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -120,6 +116,14 @@ class FunctionTemplateInfo
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info,
MaybeHandle<Name> maybe_name);
+
+ static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
+ OffThreadIsolate* isolate, Handle<FunctionTemplateInfo> info,
+ Handle<Name> maybe_name) {
+ // We don't support streaming compilation of scripts with natives, so we
+ // don't need an off-thread implementation of this.
+ UNREACHABLE();
+ }
// Returns parent function template or a null FunctionTemplateInfo.
inline FunctionTemplateInfo GetParent(Isolate* isolate);
// Returns true if |object| is an instance of this function template.
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index 0b05176a77..b61224fbbb 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -45,7 +45,7 @@ void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
}
}
-const AstConsString* FuncNameInferrer::MakeNameFromStack() {
+AstConsString* FuncNameInferrer::MakeNameFromStack() {
if (names_stack_.size() == 0) {
return ast_value_factory_->empty_cons_string();
}
@@ -70,7 +70,7 @@ const AstConsString* FuncNameInferrer::MakeNameFromStack() {
}
void FuncNameInferrer::InferFunctionsNames() {
- const AstConsString* func_name = MakeNameFromStack();
+ AstConsString* func_name = MakeNameFromStack();
for (FunctionLiteral* func : funcs_to_infer_) {
func->set_raw_inferred_name(func_name);
}
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index 66dd21f8cd..553e6fa512 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -110,7 +110,7 @@ class FuncNameInferrer {
};
// Constructs a full name in dotted notation from gathered names.
- const AstConsString* MakeNameFromStack();
+ AstConsString* MakeNameFromStack();
// Performs name inferring for added functions.
void InferFunctionsNames();
diff --git a/deps/v8/src/parsing/literal-buffer.cc b/deps/v8/src/parsing/literal-buffer.cc
index 6400809a87..70b1903279 100644
--- a/deps/v8/src/parsing/literal-buffer.cc
+++ b/deps/v8/src/parsing/literal-buffer.cc
@@ -5,19 +5,25 @@
#include "src/parsing/literal-buffer.h"
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/heap/factory.h"
#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
-Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const {
+template <typename LocalIsolate>
+Handle<String> LiteralBuffer::Internalize(LocalIsolate* isolate) const {
if (is_one_byte()) {
return isolate->factory()->InternalizeString(one_byte_literal());
}
return isolate->factory()->InternalizeString(two_byte_literal());
}
+template Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const;
+template Handle<String> LiteralBuffer::Internalize(
+ OffThreadIsolate* isolate) const;
+
int LiteralBuffer::NewCapacity(int min_capacity) {
return min_capacity < (kMaxGrowth / (kGrowthFactor - 1))
? min_capacity * kGrowthFactor
diff --git a/deps/v8/src/parsing/literal-buffer.h b/deps/v8/src/parsing/literal-buffer.h
index 3d61a00393..56d5010b53 100644
--- a/deps/v8/src/parsing/literal-buffer.h
+++ b/deps/v8/src/parsing/literal-buffer.h
@@ -63,7 +63,8 @@ class LiteralBuffer final {
is_one_byte_ = true;
}
- Handle<String> Internalize(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<String> Internalize(LocalIsolate* isolate) const;
private:
static const int kInitialCapacity = 16;
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 069696cef1..3723636804 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
+#include "src/common/globals.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
@@ -19,7 +20,7 @@
namespace v8 {
namespace internal {
-ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
+ParseInfo::ParseInfo(AccountingAllocator* zone_allocator, int script_id)
: zone_(std::make_unique<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
@@ -28,7 +29,7 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
hash_seed_(0),
function_kind_(FunctionKind::kNormalFunction),
function_syntax_kind_(FunctionSyntaxKind::kDeclaration),
- script_id_(-1),
+ script_id_(script_id),
start_position_(0),
end_position_(0),
parameters_end_pos_(kNoSourcePosition),
@@ -42,8 +43,9 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
source_range_map_(nullptr),
literal_(nullptr) {}
-ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
- : ParseInfo(zone_allocator) {
+ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator,
+ int script_id)
+ : ParseInfo(zone_allocator, script_id) {
set_hash_seed(HashSeed(isolate));
set_stack_limit(isolate->stack_guard()->real_climit());
set_runtime_call_stats(isolate->counters()->runtime_call_stats());
@@ -69,9 +71,8 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
}
ParseInfo::ParseInfo(Isolate* isolate)
- : ParseInfo(isolate, isolate->allocator()) {
- script_id_ = isolate->heap()->NextScriptId();
- LOG(isolate, ScriptEvent(Logger::ScriptEventType::kReserveId, script_id_));
+ : ParseInfo(isolate, isolate->allocator(), isolate->GetNextScriptId()) {
+ LOG(isolate, ScriptEvent(Logger::ScriptEventType::kReserveId, script_id()));
}
template <typename T>
@@ -81,12 +82,16 @@ void ParseInfo::SetFunctionInfo(T function) {
set_function_syntax_kind(function->syntax_kind());
set_requires_instance_members_initializer(
function->requires_instance_members_initializer());
+ set_class_scope_has_private_brand(function->class_scope_has_private_brand());
+ set_has_static_private_methods_or_accessors(
+ function->has_static_private_methods_or_accessors());
set_toplevel(function->is_toplevel());
set_is_oneshot_iife(function->is_oneshot_iife());
}
ParseInfo::ParseInfo(Isolate* isolate, SharedFunctionInfo shared)
- : ParseInfo(isolate, isolate->allocator()) {
+ : ParseInfo(isolate, isolate->allocator(),
+ Script::cast(shared.script()).id()) {
// Do not support re-parsing top-level function of a wrapped script.
// TODO(yangguo): consider whether we need a top-level function in a
// wrapped script at all.
@@ -101,11 +106,7 @@ ParseInfo::ParseInfo(Isolate* isolate, SharedFunctionInfo shared)
SetFunctionInfo(&shared);
Script script = Script::cast(shared.script());
- SetFlagsFromScript(isolate, script);
-
- if (shared.HasOuterScopeInfo()) {
- set_outer_scope_info(handle(shared.GetOuterScopeInfo(), isolate));
- }
+ SetFlagsForFunctionFromScript(script);
set_repl_mode(shared.is_repl_mode());
@@ -120,29 +121,27 @@ ParseInfo::ParseInfo(Isolate* isolate, SharedFunctionInfo shared)
}
ParseInfo::ParseInfo(Isolate* isolate, Script script)
- : ParseInfo(isolate, isolate->allocator()) {
- SetFlagsForToplevelCompileFromScript(isolate, script);
- set_collect_type_profile(isolate->is_collecting_type_profile() &&
- script.IsUserJavaScript());
+ : ParseInfo(isolate, isolate->allocator(), script.id()) {
+ SetFlagsForToplevelCompileFromScript(isolate, script,
+ isolate->is_collecting_type_profile());
}
// static
std::unique_ptr<ParseInfo> ParseInfo::FromParent(
const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator,
const FunctionLiteral* literal, const AstRawString* function_name) {
- std::unique_ptr<ParseInfo> result =
- std::make_unique<ParseInfo>(zone_allocator);
+ // Can't use make_unique because the constructor is private.
+ std::unique_ptr<ParseInfo> result(
+ new ParseInfo(zone_allocator, outer_parse_info->script_id_));
// Replicate shared state of the outer_parse_info.
result->flags_ = outer_parse_info->flags_;
- result->script_id_ = outer_parse_info->script_id_;
result->set_logger(outer_parse_info->logger());
result->set_ast_string_constants(outer_parse_info->ast_string_constants());
result->set_hash_seed(outer_parse_info->hash_seed());
DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
DCHECK_NULL(outer_parse_info->extension());
- DCHECK(outer_parse_info->maybe_outer_scope_info().is_null());
// Clone the function_name AstRawString into the ParseInfo's own
// AstValueFactory.
@@ -165,19 +164,17 @@ ParseInfo::~ParseInfo() = default;
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
-Handle<Script> ParseInfo::CreateScript(Isolate* isolate, Handle<String> source,
+template <typename LocalIsolate>
+Handle<Script> ParseInfo::CreateScript(LocalIsolate* isolate,
+ Handle<String> source,
ScriptOriginOptions origin_options,
- REPLMode repl_mode,
NativesFlag natives) {
// Create a script object describing the script to be compiled.
- Handle<Script> script;
- if (script_id_ == -1) {
- script = isolate->factory()->NewScript(source);
- } else {
- script = isolate->factory()->NewScriptWithId(source, script_id_);
- }
+ DCHECK_GE(script_id_, 0);
+ Handle<Script> script =
+ isolate->factory()->NewScriptWithId(source, script_id_);
if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
+ Script::InitLineEnds(isolate, script);
}
switch (natives) {
case EXTENSION_CODE:
@@ -190,12 +187,27 @@ Handle<Script> ParseInfo::CreateScript(Isolate* isolate, Handle<String> source,
break;
}
script->set_origin_options(origin_options);
- script->set_is_repl_mode(repl_mode == REPLMode::kYes);
+ script->set_is_repl_mode(is_repl_mode());
+ if (is_eval() && !is_wrapped_as_function()) {
+ script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
+ }
- SetFlagsForToplevelCompileFromScript(isolate, *script);
+ CheckFlagsForToplevelCompileFromScript(*script,
+ isolate->is_collecting_type_profile());
return script;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Script> ParseInfo::CreateScript(Isolate* isolate,
+ Handle<String> source,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Script> ParseInfo::CreateScript(OffThreadIsolate* isolate,
+ Handle<String> source,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives);
+
AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
if (!ast_value_factory_.get()) {
ast_value_factory_.reset(
@@ -206,6 +218,7 @@ AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
void ParseInfo::AllocateSourceRangeMap() {
DCHECK(block_coverage_enabled());
+ DCHECK_NULL(source_range_map());
set_source_range_map(new (zone()) SourceRangeMap(zone()));
}
@@ -217,23 +230,51 @@ void ParseInfo::set_character_stream(
character_stream_.swap(character_stream);
}
-void ParseInfo::SetFlagsForToplevelCompileFromScript(Isolate* isolate,
- Script script) {
- SetFlagsFromScript(isolate, script);
+void ParseInfo::SetFlagsForToplevelCompile(bool is_collecting_type_profile,
+ bool is_user_javascript,
+ LanguageMode language_mode,
+ REPLMode repl_mode) {
set_allow_lazy_parsing();
set_toplevel();
- set_collect_type_profile(isolate->is_collecting_type_profile() &&
- script.IsUserJavaScript());
- set_repl_mode(script.is_repl_mode());
+ set_collect_type_profile(is_user_javascript && is_collecting_type_profile);
+ set_language_mode(
+ stricter_language_mode(this->language_mode(), language_mode));
+ set_repl_mode(repl_mode == REPLMode::kYes);
+
+ if (V8_UNLIKELY(is_user_javascript && block_coverage_enabled())) {
+ AllocateSourceRangeMap();
+ }
+}
+
+template <typename LocalIsolate>
+void ParseInfo::SetFlagsForToplevelCompileFromScript(
+ LocalIsolate* isolate, Script script, bool is_collecting_type_profile) {
+ SetFlagsForFunctionFromScript(script);
+ SetFlagsForToplevelCompile(is_collecting_type_profile,
+ script.IsUserJavaScript(), language_mode(),
+ construct_repl_mode(script.is_repl_mode()));
if (script.is_wrapped()) {
set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
}
}
-void ParseInfo::SetFlagsFromScript(Isolate* isolate, Script script) {
- DCHECK(script_id_ == -1 || script_id_ == script.id());
- script_id_ = script.id();
+void ParseInfo::CheckFlagsForToplevelCompileFromScript(
+ Script script, bool is_collecting_type_profile) {
+ CheckFlagsForFunctionFromScript(script);
+ DCHECK(allow_lazy_parsing());
+ DCHECK(is_toplevel());
+ DCHECK_EQ(collect_type_profile(),
+ is_collecting_type_profile && script.IsUserJavaScript());
+ DCHECK_EQ(is_repl_mode(), script.is_repl_mode());
+
+ if (script.is_wrapped()) {
+ DCHECK_EQ(function_syntax_kind(), FunctionSyntaxKind::kWrapped);
+ }
+}
+
+void ParseInfo::SetFlagsForFunctionFromScript(Script script) {
+ DCHECK_EQ(script_id_, script.id());
set_eval(script.compilation_type() == Script::COMPILATION_TYPE_EVAL);
set_module(script.origin_options().IsModule());
@@ -242,10 +283,17 @@ void ParseInfo::SetFlagsFromScript(Isolate* isolate, Script script) {
if (block_coverage_enabled() && script.IsUserJavaScript()) {
AllocateSourceRangeMap();
}
+}
- if (script.is_wrapped()) {
- set_wrapped_arguments(handle(script.wrapped_arguments(), isolate));
- }
+void ParseInfo::CheckFlagsForFunctionFromScript(Script script) {
+ DCHECK_EQ(script_id_, script.id());
+ // We set "is_eval" for wrapped functions to get an outer declaration scope.
+ // This is a bit hacky, but ok since we can't be both eval and wrapped.
+ DCHECK_EQ(is_eval() && !is_wrapped_as_function(),
+ script.compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ DCHECK_EQ(is_module(), script.origin_options().IsModule());
+ DCHECK_IMPLIES(block_coverage_enabled() && script.IsUserJavaScript(),
+ source_range_map() != nullptr);
}
void ParseInfo::ParallelTasks::Enqueue(ParseInfo* outer_parse_info,
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 24f86e5bef..4430424eb9 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -10,6 +10,7 @@
#include <vector>
#include "include/v8.h"
+#include "src/base/export-template.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/objects/function-kind.h"
@@ -40,9 +41,7 @@ class Zone;
// A container for the inputs, configuration options, and outputs of parsing.
class V8_EXPORT_PRIVATE ParseInfo {
public:
- explicit ParseInfo(AccountingAllocator* zone_allocator);
explicit ParseInfo(Isolate*);
- ParseInfo(Isolate*, AccountingAllocator* zone_allocator);
ParseInfo(Isolate* isolate, Script script);
ParseInfo(Isolate* isolate, SharedFunctionInfo shared);
@@ -54,9 +53,10 @@ class V8_EXPORT_PRIVATE ParseInfo {
~ParseInfo();
- Handle<Script> CreateScript(Isolate* isolate, Handle<String> source,
+ template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<Script> CreateScript(LocalIsolate* isolate, Handle<String> source,
ScriptOriginOptions origin_options,
- REPLMode repl_mode = REPLMode::kNo,
NativesFlag natives = NOT_NATIVES_CODE);
// Either returns the ast-value-factory associcated with this ParseInfo, or
@@ -90,6 +90,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kRequiresInstanceMembersInitializer,
requires_instance_members_initializer,
set_requires_instance_members_initializer)
+ FLAG_ACCESSOR(kClassScopeHasPrivateBrand, class_scope_has_private_brand,
+ set_class_scope_has_private_brand)
+ FLAG_ACCESSOR(kHasStaticPrivateMethodsOrAccessors,
+ has_static_private_methods_or_accessors,
+ set_has_static_private_methods_or_accessors)
FLAG_ACCESSOR(kMightAlwaysOpt, might_always_opt, set_might_always_opt)
FLAG_ACCESSOR(kAllowNativeSyntax, allow_natives_syntax,
set_allow_natives_syntax)
@@ -256,22 +261,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
ParallelTasks* parallel_tasks() { return parallel_tasks_.get(); }
- void SetFlagsFromScript(Isolate* isolate, Script script);
+ void SetFlagsForToplevelCompile(bool is_collecting_type_profile,
+ bool is_user_javascript,
+ LanguageMode language_mode,
+ REPLMode repl_mode);
- //--------------------------------------------------------------------------
- // TODO(titzer): these should not be part of ParseInfo.
- //--------------------------------------------------------------------------
- Handle<FixedArray> wrapped_arguments() const { return wrapped_arguments_; }
- void set_wrapped_arguments(Handle<FixedArray> wrapped_arguments) {
- wrapped_arguments_ = wrapped_arguments;
- }
-
- MaybeHandle<ScopeInfo> maybe_outer_scope_info() const {
- return maybe_outer_scope_info_;
- }
- void set_outer_scope_info(Handle<ScopeInfo> outer_scope_info) {
- maybe_outer_scope_info_ = outer_scope_info;
- }
+ void CheckFlagsForFunctionFromScript(Script script);
int script_id() const { return script_id_; }
//--------------------------------------------------------------------------
@@ -285,7 +280,17 @@ class V8_EXPORT_PRIVATE ParseInfo {
}
private:
- void SetFlagsForToplevelCompileFromScript(Isolate* isolate, Script script);
+ ParseInfo(AccountingAllocator* zone_allocator, int script_id);
+ ParseInfo(Isolate*, AccountingAllocator* zone_allocator, int script_id);
+
+ void SetFlagsForFunctionFromScript(Script script);
+
+ template <typename LocalIsolate>
+ void SetFlagsForToplevelCompileFromScript(LocalIsolate* isolate,
+ Script script,
+ bool is_collecting_type_profile);
+ void CheckFlagsForToplevelCompileFromScript(Script script,
+ bool is_collecting_type_profile);
// Set function info flags based on those in either FunctionLiteral or
// SharedFunctionInfo |function|
@@ -319,13 +324,14 @@ class V8_EXPORT_PRIVATE ParseInfo {
kAllowHarmonyDynamicImport = 1u << 21,
kAllowHarmonyImportMeta = 1u << 22,
kAllowHarmonyOptionalChaining = 1u << 23,
- kAllowHarmonyPrivateFields = 1u << 24,
+ kHasStaticPrivateMethodsOrAccessors = 1u << 24,
kAllowHarmonyPrivateMethods = 1u << 25,
kIsOneshotIIFE = 1u << 26,
kCollectSourcePositions = 1u << 27,
kAllowHarmonyNullish = 1u << 28,
kAllowHarmonyTopLevelAwait = 1u << 29,
kREPLMode = 1u << 30,
+ kClassScopeHasPrivateBrand = 1u << 31,
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -344,10 +350,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
int function_literal_id_;
int max_function_literal_id_;
- // TODO(titzer): Move handles out of ParseInfo.
- Handle<FixedArray> wrapped_arguments_;
- MaybeHandle<ScopeInfo> maybe_outer_scope_info_;
-
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
std::unique_ptr<ConsumedPreparseData> consumed_preparse_data_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 16129a4c00..29e527ce2c 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -232,8 +232,6 @@ class ParserBase {
using FuncNameInferrerState = typename Types::FuncNameInferrer::State;
using SourceRange = typename Types::SourceRange;
using SourceRangeScope = typename Types::SourceRangeScope;
- using TargetT = typename Types::Target;
- using TargetScopeT = typename Types::TargetScope;
// All implementation-specific methods must be called through this.
Impl* impl() { return static_cast<Impl*>(this); }
@@ -320,7 +318,6 @@ class ParserBase {
int loop_nesting_depth() const {
return function_state_->loop_nesting_depth();
}
-
int GetNextFunctionLiteralId() { return ++function_literal_id_; }
int GetLastFunctionLiteralId() const { return function_literal_id_; }
@@ -370,6 +367,93 @@ class ParserBase {
Scope* const outer_scope_;
};
+ // ---------------------------------------------------------------------------
+ // Target is a support class to facilitate manipulation of the
+ // Parser's target_stack_ (the stack of potential 'break' and
+ // 'continue' statement targets). Upon construction, a new target is
+ // added; it is removed upon destruction.
+
+ // |labels| is a list of all labels that can be used as a target for break.
+ // |own_labels| is a list of all labels that an iteration statement is
+ // directly prefixed with, i.e. all the labels that a continue statement in
+ // the body can use to continue this iteration statement. This is always a
+ // subset of |labels|.
+ //
+ // Example: "l1: { l2: if (b) l3: l4: for (;;) s }"
+ // labels() of the Block will be l1.
+ // labels() of the ForStatement will be l2, l3, l4.
+ // own_labels() of the ForStatement will be l3, l4.
+ class Target {
+ public:
+ enum TargetType { TARGET_FOR_ANONYMOUS, TARGET_FOR_NAMED_ONLY };
+
+ Target(ParserBase* parser, BreakableStatementT statement,
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, TargetType target_type)
+ : stack_(parser->function_state_->target_stack_address()),
+ statement_(statement),
+ labels_(labels),
+ own_labels_(own_labels),
+ target_type_(target_type),
+ previous_(*stack_) {
+ DCHECK_IMPLIES(Impl::IsIterationStatement(statement_),
+ target_type == Target::TARGET_FOR_ANONYMOUS);
+ DCHECK_IMPLIES(!Impl::IsIterationStatement(statement_),
+ own_labels == nullptr);
+ *stack_ = this;
+ }
+
+ ~Target() { *stack_ = previous_; }
+
+ const Target* previous() const { return previous_; }
+ const BreakableStatementT statement() const { return statement_; }
+ const ZonePtrList<const AstRawString>* labels() const { return labels_; }
+ const ZonePtrList<const AstRawString>* own_labels() const {
+ return own_labels_;
+ }
+ bool is_iteration() const { return Impl::IsIterationStatement(statement_); }
+ bool is_target_for_anonymous() const {
+ return target_type_ == TARGET_FOR_ANONYMOUS;
+ }
+
+ private:
+ Target** const stack_;
+ const BreakableStatementT statement_;
+ const ZonePtrList<const AstRawString>* const labels_;
+ const ZonePtrList<const AstRawString>* const own_labels_;
+ const TargetType target_type_;
+ Target* const previous_;
+ };
+
+ Target* target_stack() { return *function_state_->target_stack_address(); }
+
+ BreakableStatementT LookupBreakTarget(IdentifierT label) {
+ bool anonymous = impl()->IsNull(label);
+ for (const Target* t = target_stack(); t != nullptr; t = t->previous()) {
+ if ((anonymous && t->is_target_for_anonymous()) ||
+ (!anonymous &&
+ ContainsLabel(t->labels(),
+ impl()->GetRawNameFromIdentifier(label)))) {
+ return t->statement();
+ }
+ }
+ return impl()->NullStatement();
+ }
+
+ IterationStatementT LookupContinueTarget(IdentifierT label) {
+ bool anonymous = impl()->IsNull(label);
+ for (const Target* t = target_stack(); t != nullptr; t = t->previous()) {
+ if (!t->is_iteration()) continue;
+
+ DCHECK(t->is_target_for_anonymous());
+ if (anonymous || ContainsLabel(t->own_labels(),
+ impl()->GetRawNameFromIdentifier(label))) {
+ return impl()->AsIterationStatement(t->statement());
+ }
+ }
+ return impl()->NullStatement();
+ }
+
class FunctionState final : public BlockState {
public:
FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
@@ -427,7 +511,7 @@ class ParserBase {
PointerWithPayload<FunctionState, bool, 1> state_and_prev_value_;
};
- class LoopScope {
+ class LoopScope final {
public:
explicit LoopScope(FunctionState* function_state)
: function_state_(function_state) {
@@ -442,6 +526,8 @@ class ParserBase {
int loop_nesting_depth() const { return loop_nesting_depth_; }
+ Target** target_stack_address() { return &target_stack_; }
+
private:
// Properties count estimation.
int expected_property_count_;
@@ -455,6 +541,7 @@ class ParserBase {
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
DeclarationScope* scope_;
+ Target* target_stack_ = nullptr; // for break, continue statements
// A reason, if any, why this function should not be optimized.
BailoutReason dont_optimize_reason_;
@@ -539,6 +626,7 @@ class ParserBase {
has_name_static_property(false),
has_static_computed_names(false),
has_static_class_fields(false),
+ has_static_private_methods(false),
has_instance_members(false),
requires_brand(false),
is_anonymous(false),
@@ -557,6 +645,7 @@ class ParserBase {
bool has_name_static_property;
bool has_static_computed_names;
bool has_static_class_fields;
+ bool has_static_private_methods;
bool has_instance_members;
bool requires_brand;
bool is_anonymous;
@@ -623,6 +712,46 @@ class ParserBase {
bool is_rest;
};
+ void DeclareLabel(ZonePtrList<const AstRawString>** labels,
+ ZonePtrList<const AstRawString>** own_labels,
+ const AstRawString* label) {
+ if (ContainsLabel(*labels, label) || TargetStackContainsLabel(label)) {
+ ReportMessage(MessageTemplate::kLabelRedeclaration, label);
+ return;
+ }
+
+ // Add {label} to both {labels} and {own_labels}.
+ if (*labels == nullptr) {
+ DCHECK_NULL(*own_labels);
+ *labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+ *own_labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+ } else {
+ if (*own_labels == nullptr) {
+ *own_labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+ }
+ }
+ (*labels)->Add(label, zone());
+ (*own_labels)->Add(label, zone());
+ }
+
+ bool ContainsLabel(const ZonePtrList<const AstRawString>* labels,
+ const AstRawString* label) {
+ DCHECK_NOT_NULL(label);
+ if (labels != nullptr) {
+ for (int i = labels->length(); i-- > 0;) {
+ if (labels->at(i) == label) return true;
+ }
+ }
+ return false;
+ }
+
+ bool TargetStackContainsLabel(const AstRawString* label) {
+ for (const Target* t = target_stack(); t != nullptr; t = t->previous()) {
+ if (ContainsLabel(t->labels(), label)) return true;
+ }
+ return false;
+ }
+
ClassLiteralProperty::Kind ClassPropertyKindFor(ParsePropertyKind kind) {
switch (kind) {
case ParsePropertyKind::kAccessorGetter:
@@ -2092,7 +2221,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseProperty(
case Token::BIGINT: {
Consume(Token::BIGINT);
- prop_info->name = impl()->GetNumberAsSymbol();
+ prop_info->name = impl()->GetSymbol();
is_array_index = impl()->IsArrayIndex(prop_info->name, &index);
break;
}
@@ -4436,8 +4565,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
if (V8_UNLIKELY(prop_info.is_private)) {
DCHECK(!is_constructor);
class_info.requires_brand |= (!is_field && !prop_info.is_static);
- class_info.has_private_methods |=
- property_kind == ClassLiteralProperty::METHOD;
+ bool is_method = property_kind == ClassLiteralProperty::METHOD;
+ class_info.has_private_methods |= is_method;
+ class_info.has_static_private_methods |= is_method && prop_info.is_static;
impl()->DeclarePrivateClassMember(class_scope, prop_info.name, property,
property_kind, prop_info.is_static,
&class_info);
@@ -4476,7 +4606,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
if (class_info.requires_brand) {
- // TODO(joyee): implement static brand checking
class_scope->DeclareBrandVariable(
ast_value_factory(), IsStaticFlag::kNotStatic, kNoSourcePosition);
}
@@ -4809,10 +4938,6 @@ void ParserBase<Impl>::ParseStatementList(StatementListT* body,
}
}
- // Allocate a target stack to use for this set of source elements. This way,
- // all scripts and functions get their own target stack thus avoiding illegal
- // breaks and continues across functions.
- TargetScopeT target_scope(this);
while (peek() != end_token) {
StatementT stat = ParseStatementListItem();
if (impl()->IsNull(stat)) return;
@@ -4932,8 +5057,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
// put the labels there.
if (labels == nullptr) return ParseTryStatement();
StatementListT statements(pointer_buffer());
- BlockT result = factory()->NewBlock(false, labels);
- TargetT target(this, result);
+ BlockT result = factory()->NewBlock(false, true);
+ Target target(this, result, labels, nullptr,
+ Target::TARGET_FOR_NAMED_ONLY);
StatementT statement = ParseTryStatement();
statements.Add(statement);
result->InitializeStatements(statements, zone());
@@ -4981,7 +5107,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
// '{' StatementList '}'
// Parse the statements and collect escaping labels.
- BlockT body = factory()->NewBlock(false, labels);
+ BlockT body = factory()->NewBlock(false, labels != nullptr);
StatementListT statements(pointer_buffer());
CheckStackOverflow();
@@ -4989,7 +5115,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
{
BlockState block_state(zone(), &scope_);
scope()->set_start_position(peek_position());
- TargetT target(this, body);
+ Target target(this, body, labels, nullptr, Target::TARGET_FOR_NAMED_ONLY);
Expect(Token::LBRACE);
@@ -5214,11 +5340,11 @@ ParserBase<Impl>::ParseContinueStatement() {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier();
}
- IterationStatementT target = impl()->LookupContinueTarget(label);
+ IterationStatementT target = LookupContinueTarget(label);
if (impl()->IsNull(target)) {
// Illegal continue statement.
MessageTemplate message = MessageTemplate::kIllegalContinue;
- BreakableStatementT breakable_target = impl()->LookupBreakTarget(label);
+ BreakableStatementT breakable_target = LookupBreakTarget(label);
if (impl()->IsNull(label)) {
message = MessageTemplate::kNoIterationStatement;
} else if (impl()->IsNull(breakable_target)) {
@@ -5250,11 +5376,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
}
// Parse labeled break statements that target themselves into
// empty statements, e.g. 'l1: l2: l3: break l2;'
- if (!impl()->IsNull(label) && impl()->ContainsLabel(labels, label)) {
+ if (!impl()->IsNull(label) &&
+ impl()->ContainsLabel(labels, impl()->GetRawNameFromIdentifier(label))) {
ExpectSemicolon();
return factory()->EmptyStatement();
}
- BreakableStatementT target = impl()->LookupBreakTarget(label);
+ BreakableStatementT target = LookupBreakTarget(label);
if (impl()->IsNull(target)) {
// Illegal break statement.
MessageTemplate message = MessageTemplate::kIllegalBreak;
@@ -5349,9 +5476,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
// 'do' Statement 'while' '(' Expression ')' ';'
typename FunctionState::LoopScope loop_scope(function_state_);
- auto loop =
- factory()->NewDoWhileStatement(labels, own_labels, peek_position());
- TargetT target(this, loop);
+ auto loop = factory()->NewDoWhileStatement(peek_position());
+ Target target(this, loop, labels, own_labels, Target::TARGET_FOR_ANONYMOUS);
SourceRange body_range;
StatementT body = impl()->NullStatement();
@@ -5389,8 +5515,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
// 'while' '(' Expression ')' Statement
typename FunctionState::LoopScope loop_scope(function_state_);
- auto loop = factory()->NewWhileStatement(labels, own_labels, peek_position());
- TargetT target(this, loop);
+ auto loop = factory()->NewWhileStatement(peek_position());
+ Target target(this, loop, labels, own_labels, Target::TARGET_FOR_ANONYMOUS);
SourceRange body_range;
StatementT body = impl()->NullStatement();
@@ -5438,7 +5564,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
// CaseClause ::
// 'case' Expression ':' StatementList
// 'default' ':' StatementList
-
int switch_pos = peek_position();
Consume(Token::SWITCH);
@@ -5446,14 +5571,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
ExpressionT tag = ParseExpression();
Expect(Token::RPAREN);
- auto switch_statement =
- factory()->NewSwitchStatement(labels, tag, switch_pos);
+ auto switch_statement = factory()->NewSwitchStatement(tag, switch_pos);
{
BlockState cases_block_state(zone(), &scope_);
scope()->set_start_position(switch_pos);
scope()->SetNonlinear();
- TargetT target(this, switch_statement);
+ Target target(this, switch_statement, labels, nullptr,
+ Target::TARGET_FOR_ANONYMOUS);
bool default_seen = false;
Expect(Token::LBRACE);
@@ -5788,9 +5913,8 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
BlockT init_block = impl()->RewriteForVarInLegacy(*for_info);
- auto loop = factory()->NewForEachStatement(for_info->mode, labels, own_labels,
- stmt_pos);
- TargetT target(this, loop);
+ auto loop = factory()->NewForEachStatement(for_info->mode, stmt_pos);
+ Target target(this, loop, labels, own_labels, Target::TARGET_FOR_ANONYMOUS);
ExpressionT enumerable = impl()->NullExpression();
if (for_info->mode == ForEachStatement::ITERATE) {
@@ -5852,9 +5976,8 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels) {
- auto loop = factory()->NewForEachStatement(for_info->mode, labels, own_labels,
- stmt_pos);
- TargetT target(this, loop);
+ auto loop = factory()->NewForEachStatement(for_info->mode, stmt_pos);
+ Target target(this, loop, labels, own_labels, Target::TARGET_FOR_ANONYMOUS);
ExpressionT enumerable = impl()->NullExpression();
if (for_info->mode == ForEachStatement::ITERATE) {
@@ -5943,8 +6066,8 @@ typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
ZonePtrList<const AstRawString>* own_labels, ExpressionT* cond,
StatementT* next, StatementT* body) {
CheckStackOverflow();
- ForStatementT loop = factory()->NewForStatement(labels, own_labels, stmt_pos);
- TargetT target(this, loop);
+ ForStatementT loop = factory()->NewForStatement(stmt_pos);
+ Target target(this, loop, labels, own_labels, Target::TARGET_FOR_ANONYMOUS);
if (peek() != Token::SEMICOLON) {
*cond = ParseExpression();
@@ -5988,13 +6111,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
scope()->set_start_position(scanner()->location().beg_pos);
scope()->set_is_hidden();
- auto loop = factory()->NewForOfStatement(labels, own_labels, stmt_pos,
- IteratorType::kAsync);
+ auto loop = factory()->NewForOfStatement(stmt_pos, IteratorType::kAsync);
// Two suspends: one for next() and one for return()
function_state_->AddSuspend();
function_state_->AddSuspend();
- TargetT target(this, loop);
+ Target target(this, loop, labels, own_labels, Target::TARGET_FOR_ANONYMOUS);
ExpressionT each_variable = impl()->NullExpression();
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 21beff8790..2a1ad0e98b 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -356,6 +356,11 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
const Runtime::Function* function =
Runtime::FunctionForName(name->raw_data(), name->length());
+ // Be more premissive when fuzzing. Intrinsics are not supported.
+ if (FLAG_allow_natives_for_fuzzing) {
+ return NewV8RuntimeFunctionForFuzzing(function, args, pos);
+ }
+
if (function != nullptr) {
// Check for possible name clash.
DCHECK_EQ(Context::kNotFound,
@@ -382,6 +387,34 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
return factory()->NewCallRuntime(context_index, args, pos);
}
+// More permissive runtime-function creation on fuzzers.
+Expression* Parser::NewV8RuntimeFunctionForFuzzing(
+ const Runtime::Function* function, const ScopedPtrList<Expression>& args,
+ int pos) {
+ CHECK(FLAG_allow_natives_for_fuzzing);
+
+ // Intrinsics are not supported for fuzzing. Only allow whitelisted runtime
+ // functions. Also prevent later errors due to too few arguments and just
+ // ignore this call.
+ if (function == nullptr ||
+ !Runtime::IsWhitelistedForFuzzing(function->function_id) ||
+ function->nargs > args.length()) {
+ return factory()->NewUndefinedLiteral(kNoSourcePosition);
+ }
+
+ // Flexible number of arguments permitted.
+ if (function->nargs == -1) {
+ return factory()->NewCallRuntime(function, args, pos);
+ }
+
+ // Otherwise ignore superfluous arguments.
+ ScopedPtrList<Expression> permissive_args(pointer_buffer());
+ for (int i = 0; i < function->nargs; i++) {
+ permissive_args.Add(args.at(i));
+ }
+ return factory()->NewCallRuntime(function, permissive_args, pos);
+}
+
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->GetOrCreateAstValueFactory(),
@@ -394,7 +427,6 @@ Parser::Parser(ParseInfo* info)
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
- target_stack_(nullptr),
total_preparse_skipped_(0),
consumed_preparse_data_(info->consumed_preparse_data()),
preparse_data_buffer_(),
@@ -483,8 +515,9 @@ void MaybeProcessSourceRanges(ParseInfo* parse_info, Expression* root,
} // namespace
-FunctionLiteral* Parser::ParseProgram(Isolate* isolate, Handle<Script> script,
- ParseInfo* info) {
+FunctionLiteral* Parser::ParseProgram(
+ Isolate* isolate, Handle<Script> script, ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
// TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
// see comment for HistogramTimerScope class.
DCHECK_EQ(script->id(), script_id());
@@ -501,9 +534,14 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, Handle<Script> script,
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
// Initialize parser state.
- DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info(),
+ DeserializeScopeChain(isolate, info, maybe_outer_scope_info,
Scope::DeserializationMode::kIncludingVariables);
+ DCHECK_EQ(script->is_wrapped(), info->is_wrapped_as_function());
+ if (script->is_wrapped()) {
+ maybe_wrapped_arguments_ = handle(script->wrapped_arguments(), isolate);
+ }
+
scanner_.Initialize();
FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
@@ -533,7 +571,6 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
// isolate will be nullptr.
DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK_NULL(scope_);
- DCHECK_NULL(target_stack_);
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
ResetFunctionLiteralId();
@@ -559,18 +596,6 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
int beg_pos = scanner()->location().beg_pos;
if (parsing_module_) {
DCHECK(info->is_module());
- // Declare the special module parameter.
- auto name = ast_value_factory()->empty_string();
- bool is_rest = false;
- bool is_optional = false;
- VariableMode mode = VariableMode::kVar;
- bool was_added;
- scope->DeclareLocal(name, mode, PARAMETER_VARIABLE, &was_added,
- Variable::DefaultInitializationFlag(mode));
- DCHECK(was_added);
- auto var = scope->DeclareParameter(name, VariableMode::kVar, is_optional,
- is_rest, ast_value_factory(), beg_pos);
- var->AllocateTo(VariableLocation::PARAMETER, 0);
PrepareGeneratorVariables();
Expression* initial_yield =
@@ -638,7 +663,7 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
// conflicting var declarations with outer scope-info-backed scopes.
if (info->is_eval()) {
DCHECK(parsing_on_main_thread_);
- info->ast_value_factory()->Internalize(isolate->factory());
+ info->ast_value_factory()->Internalize(isolate);
}
CheckConflictingVarDeclarations(scope);
@@ -652,7 +677,7 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
}
}
- int parameter_count = parsing_module_ ? 1 : 0;
+ int parameter_count = 0;
result = factory()->NewScriptOrEvalFunctionLiteral(
scope, body, function_state.expected_property_count(), parameter_count);
result->set_suspend_count(function_state.suspend_count());
@@ -660,9 +685,6 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
info->set_max_function_literal_id(GetLastFunctionLiteralId());
- // Make sure the target stack is empty.
- DCHECK_NULL(target_stack_);
-
if (has_error()) return nullptr;
RecordFunctionLiteralSourceRange(result);
@@ -674,7 +696,7 @@ ZonePtrList<const AstRawString>* Parser::PrepareWrappedArguments(
Isolate* isolate, ParseInfo* info, Zone* zone) {
DCHECK(parsing_on_main_thread_);
DCHECK_NOT_NULL(isolate);
- Handle<FixedArray> arguments = info->wrapped_arguments();
+ Handle<FixedArray> arguments = maybe_wrapped_arguments_.ToHandleChecked();
int arguments_length = arguments->length();
ZonePtrList<const AstRawString>* arguments_for_wrapped_function =
new (zone) ZonePtrList<const AstRawString>(arguments_length, zone);
@@ -780,10 +802,19 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info(),
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info;
+ if (shared_info->HasOuterScopeInfo()) {
+ maybe_outer_scope_info = handle(shared_info->GetOuterScopeInfo(), isolate);
+ }
+ DeserializeScopeChain(isolate, info, maybe_outer_scope_info,
Scope::DeserializationMode::kIncludingVariables);
DCHECK_EQ(factory()->zone(), info->zone());
+ if (shared_info->is_wrapped()) {
+ maybe_wrapped_arguments_ = handle(
+ Script::cast(shared_info->script()).wrapped_arguments(), isolate);
+ }
+
// Initialize parser state.
Handle<String> name(shared_info->Name(), isolate);
info->set_function_name(ast_value_factory()->GetString(name));
@@ -810,7 +841,7 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
// We need to make sure that the debug-name is available.
- ast_value_factory()->Internalize(isolate->factory());
+ ast_value_factory()->Internalize(isolate);
DeclarationScope* function_scope = result->scope();
std::unique_ptr<char[]> function_name = result->GetDebugName();
LOG(isolate,
@@ -827,7 +858,6 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK_NOT_NULL(raw_name);
DCHECK_NULL(scope_);
- DCHECK_NULL(target_stack_);
DCHECK(ast_value_factory());
fni_.PushEnclosingName(raw_name);
@@ -946,13 +976,15 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
if (has_error()) return nullptr;
result->set_requires_instance_members_initializer(
info->requires_instance_members_initializer());
+ result->set_class_scope_has_private_brand(
+ info->class_scope_has_private_brand());
+ result->set_has_static_private_methods_or_accessors(
+ info->has_static_private_methods_or_accessors());
if (info->is_oneshot_iife()) {
result->mark_as_oneshot_iife();
}
}
- // Make sure the target stack is empty.
- DCHECK_NULL(target_stack_);
DCHECK_IMPLIES(result,
info->function_literal_id() == result->function_literal_id());
return result;
@@ -1580,43 +1612,6 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos) {
pos);
}
-void Parser::DeclareLabel(ZonePtrList<const AstRawString>** labels,
- ZonePtrList<const AstRawString>** own_labels,
- const AstRawString* label) {
- // TODO(1240780): We don't check for redeclaration of labels during preparsing
- // since keeping track of the set of active labels requires nontrivial changes
- // to the way scopes are structured. However, these are probably changes we
- // want to make later anyway so we should go back and fix this then.
- if (ContainsLabel(*labels, label) || TargetStackContainsLabel(label)) {
- ReportMessage(MessageTemplate::kLabelRedeclaration, label);
- return;
- }
-
- // Add {label} to both {labels} and {own_labels}.
- if (*labels == nullptr) {
- DCHECK_NULL(*own_labels);
- *labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
- *own_labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
- } else {
- if (*own_labels == nullptr) {
- *own_labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
- }
- }
- (*labels)->Add(label, zone());
- (*own_labels)->Add(label, zone());
-}
-
-bool Parser::ContainsLabel(ZonePtrList<const AstRawString>* labels,
- const AstRawString* label) {
- DCHECK_NOT_NULL(label);
- if (labels != nullptr) {
- for (int i = labels->length(); i-- > 0;) {
- if (labels->at(i) == label) return true;
- }
- }
- return false;
-}
-
Block* Parser::IgnoreCompletion(Statement* statement) {
Block* block = factory()->NewBlock(1, true);
block->statements()->Add(statement, zone());
@@ -1812,6 +1807,11 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
statements.Add(
factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
ParseStatementList(&statements, Token::RBRACE);
+ // Since the whole body is wrapped in a try-catch, make the implicit
+ // end-of-function return explicit to ensure BytecodeGenerator's special
+ // handling for ReturnStatements in async generators applies.
+ statements.Add(factory()->NewSyntheticAsyncReturnStatement(
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition));
// Don't create iterator result for async generators, as the resume methods
// will create it.
@@ -2052,8 +2052,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// explicit break target, instead handing it directly to those nodes that
// need to know about it. This should be safe because we don't run any code
// in this function that looks up break targets.
- ForStatement* outer_loop =
- factory()->NewForStatement(nullptr, nullptr, kNoSourcePosition);
+ ForStatement* outer_loop = factory()->NewForStatement(kNoSourcePosition);
outer_block->statements()->Add(outer_loop, zone());
outer_block->set_scope(scope());
@@ -2992,6 +2991,12 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
class_info->instance_fields->length());
}
+ if (class_info->requires_brand) {
+ class_info->constructor->set_class_scope_has_private_brand(true);
+ }
+ if (class_info->has_static_private_methods) {
+ class_info->constructor->set_has_static_private_methods_or_accessors(true);
+ }
ClassLiteral* class_literal = factory()->NewClassLiteral(
block_scope, class_info->extends, class_info->constructor,
class_info->public_members, class_info->private_members,
@@ -3044,41 +3049,9 @@ void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
// ----------------------------------------------------------------------------
// Parser support
-bool Parser::TargetStackContainsLabel(const AstRawString* label) {
- for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
- if (ContainsLabel(t->statement()->labels(), label)) return true;
- }
- return false;
-}
-
-BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label) {
- bool anonymous = label == nullptr;
- for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
- BreakableStatement* stat = t->statement();
- if ((anonymous && stat->is_target_for_anonymous()) ||
- (!anonymous && ContainsLabel(stat->labels(), label))) {
- return stat;
- }
- }
- return nullptr;
-}
-
-IterationStatement* Parser::LookupContinueTarget(const AstRawString* label) {
- bool anonymous = label == nullptr;
- for (ParserTarget* t = target_stack_; t != nullptr; t = t->previous()) {
- IterationStatement* stat = t->statement()->AsIterationStatement();
- if (stat == nullptr) continue;
-
- DCHECK(stat->is_target_for_anonymous());
- if (anonymous || ContainsLabel(stat->own_labels(), label)) {
- return stat;
- }
- if (ContainsLabel(stat->labels(), label)) break;
- }
- return nullptr;
-}
-
-void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
+template <typename LocalIsolate>
+void Parser::HandleSourceURLComments(LocalIsolate* isolate,
+ Handle<Script> script) {
Handle<String> source_url = scanner_.SourceUrl(isolate);
if (!source_url.is_null()) {
script->set_source_url(*source_url);
@@ -3089,6 +3062,11 @@ void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
}
}
+template void Parser::HandleSourceURLComments(Isolate* isolate,
+ Handle<Script> script);
+template void Parser::HandleSourceURLComments(OffThreadIsolate* isolate,
+ Handle<Script> script);
+
void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
// Move statistics to Isolate.
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
@@ -3117,7 +3095,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
FunctionLiteral* result = nullptr;
scanner_.Initialize();
- DCHECK(info->maybe_outer_scope_info().is_null());
DCHECK(original_scope_);
@@ -3134,6 +3111,7 @@ void Parser::ParseOnBackground(ParseInfo* info) {
DoParseFunction(/* isolate = */ nullptr, info, info->function_name());
}
MaybeResetCharacterStream(info, result);
+ MaybeProcessSourceRanges(info, result, stack_limit_);
info->set_literal(result);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index d53ccc7fbd..fd24ffb3e8 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -122,8 +122,6 @@ struct ParserTypes<Parser> {
using FuncNameInferrer = v8::internal::FuncNameInferrer;
using SourceRange = v8::internal::SourceRange;
using SourceRangeScope = v8::internal::SourceRangeScope;
- using Target = ParserTarget;
- using TargetScope = ParserTargetScope;
};
class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
@@ -157,7 +155,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Move statistics to Isolate
void UpdateStatistics(Isolate* isolate, Handle<Script> script);
- void HandleSourceURLComments(Isolate* isolate, Handle<Script> script);
+ template <typename LocalIsolate>
+ void HandleSourceURLComments(LocalIsolate* isolate, Handle<Script> script);
private:
friend class ParserBase<Parser>;
@@ -167,8 +166,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
friend class i::ParameterDeclarationParsingScope<ParserTypes<Parser>>;
friend class i::ArrowHeadParsingScope<ParserTypes<Parser>>;
friend bool v8::internal::parsing::ParseProgram(
- ParseInfo*, Handle<Script>, Isolate*,
- parsing::ReportErrorsAndStatisticsMode stats_mode);
+ ParseInfo*, Handle<Script>, MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ Isolate*, parsing::ReportErrorsAndStatisticsMode stats_mode);
friend bool v8::internal::parsing::ParseFunction(
ParseInfo*, Handle<SharedFunctionInfo> shared_info, Isolate*,
parsing::ReportErrorsAndStatisticsMode stats_mode);
@@ -210,7 +209,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Returns nullptr if parsing failed.
FunctionLiteral* ParseProgram(Isolate* isolate, Handle<Script> script,
- ParseInfo* info);
+ ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info);
FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info,
Handle<SharedFunctionInfo> shared_info);
@@ -279,11 +279,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
};
ZonePtrList<const NamedImport>* ParseNamedImports(int pos);
Statement* BuildInitializationBlock(DeclarationParsingResult* parsing_result);
- void DeclareLabel(ZonePtrList<const AstRawString>** labels,
- ZonePtrList<const AstRawString>** own_labels,
- const AstRawString* label);
- bool ContainsLabel(ZonePtrList<const AstRawString>* labels,
- const AstRawString* label);
Expression* RewriteReturn(Expression* return_value, int pos);
Statement* RewriteSwitchStatement(SwitchStatement* switch_statement,
Scope* scope);
@@ -407,10 +402,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Scope* declaration_scope, bool* was_added, int var_begin_pos,
int var_end_pos = kNoSourcePosition);
- bool TargetStackContainsLabel(const AstRawString* label);
- BreakableStatement* LookupBreakTarget(const AstRawString* label);
- IterationStatement* LookupContinueTarget(const AstRawString* label);
-
// Factory methods.
FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
int pos, int end_pos);
@@ -727,6 +718,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return arg;
}
+ IterationStatement* AsIterationStatement(BreakableStatement* s) {
+ return s->AsIterationStatement();
+ }
+
void ReportUnexpectedTokenAt(
Scanner::Location location, Token::Value token,
MessageTemplate message = MessageTemplate::kUnexpectedToken);
@@ -750,6 +745,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return subject == nullptr;
}
+ V8_INLINE static bool IsIterationStatement(Statement* subject) {
+ return subject->AsIterationStatement() != nullptr;
+ }
+
// Non-null empty string.
V8_INLINE const AstRawString* EmptyIdentifierString() const {
return ast_value_factory()->empty_string();
@@ -833,6 +832,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* NewV8Intrinsic(const AstRawString* name,
const ScopedPtrList<Expression>& args, int pos);
+ Expression* NewV8RuntimeFunctionForFuzzing(
+ const Runtime::Function* function, const ScopedPtrList<Expression>& args,
+ int pos);
+
V8_INLINE Statement* NewThrowStatement(Expression* exception, int pos) {
return factory()->NewExpressionStatement(
factory()->NewThrow(exception, pos), pos);
@@ -1041,11 +1044,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
PreParser* reusable_preparser_;
Mode mode_;
+ MaybeHandle<FixedArray> maybe_wrapped_arguments_;
+
SourceRangeMap* source_range_map_ = nullptr;
- friend class ParserTarget;
friend class ParserTargetScope;
- ParserTarget* target_stack_; // for break, continue statements
ScriptCompiler::CompileOptions compile_options_;
@@ -1068,47 +1071,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int parameters_end_pos_;
};
-// ----------------------------------------------------------------------------
-// Target is a support class to facilitate manipulation of the
-// Parser's target_stack_ (the stack of potential 'break' and
-// 'continue' statement targets). Upon construction, a new target is
-// added; it is removed upon destruction.
-
-class ParserTarget {
- public:
- ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
- : variable_(&parser->impl()->target_stack_),
- statement_(statement),
- previous_(parser->impl()->target_stack_) {
- parser->impl()->target_stack_ = this;
- }
-
- ~ParserTarget() { *variable_ = previous_; }
-
- ParserTarget* previous() { return previous_; }
- BreakableStatement* statement() { return statement_; }
-
- private:
- ParserTarget** variable_;
- BreakableStatement* statement_;
- ParserTarget* previous_;
-};
-
-class ParserTargetScope {
- public:
- explicit ParserTargetScope(ParserBase<Parser>* parser)
- : variable_(&parser->impl()->target_stack_),
- previous_(parser->impl()->target_stack_) {
- parser->impl()->target_stack_ = nullptr;
- }
-
- ~ParserTargetScope() { *variable_ = previous_; }
-
- private:
- ParserTarget** variable_;
- ParserTarget* previous_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index cb8334bc51..8a960cdc29 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -8,6 +8,7 @@
#include "src/ast/ast.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
@@ -18,8 +19,9 @@ namespace v8 {
namespace internal {
namespace parsing {
-bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
- ReportErrorsAndStatisticsMode mode) {
+bool ParseProgram(ParseInfo* info, Handle<Script> script,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
DCHECK(info->is_toplevel());
DCHECK_NULL(info->literal());
@@ -38,7 +40,7 @@ bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
- result = parser.ParseProgram(isolate, script, info);
+ result = parser.ParseProgram(isolate, script, info, maybe_outer_scope_info);
info->set_literal(result);
if (result) {
info->set_language_mode(info->literal()->language_mode());
@@ -57,6 +59,11 @@ bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
return (result != nullptr);
}
+bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
+ ReportErrorsAndStatisticsMode mode) {
+ return ParseProgram(info, script, kNullMaybeHandle, isolate, mode);
+}
+
bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
DCHECK(!info->is_toplevel());
@@ -83,7 +90,7 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
result = parser.ParseFunction(isolate, info, shared_info);
info->set_literal(result);
if (result) {
- info->ast_value_factory()->Internalize(isolate->factory());
+ info->ast_value_factory()->Internalize(isolate);
if (info->is_eval()) {
info->set_allow_eval_cache(parser.allow_eval_cache());
}
@@ -102,11 +109,17 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
bool ParseAny(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
DCHECK(!shared_info.is_null());
- return info->is_toplevel()
- ? ParseProgram(
- info, handle(Script::cast(shared_info->script()), isolate),
- isolate, mode)
- : ParseFunction(info, shared_info, isolate, mode);
+ if (info->is_toplevel()) {
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info;
+ if (shared_info->HasOuterScopeInfo()) {
+ maybe_outer_scope_info =
+ handle(shared_info->GetOuterScopeInfo(), isolate);
+ }
+ return ParseProgram(info,
+ handle(Script::cast(shared_info->script()), isolate),
+ maybe_outer_scope_info, isolate, mode);
+ }
+ return ParseFunction(info, shared_info, isolate, mode);
}
} // namespace parsing
diff --git a/deps/v8/src/parsing/parsing.h b/deps/v8/src/parsing/parsing.h
index 9bce504c1e..f235017139 100644
--- a/deps/v8/src/parsing/parsing.h
+++ b/deps/v8/src/parsing/parsing.h
@@ -18,12 +18,21 @@ namespace parsing {
enum class ReportErrorsAndStatisticsMode { kYes, kNo };
// Parses the top-level source code represented by the parse info and sets its
-// function literal. Returns false (and deallocates any allocated AST
-// nodes) if parsing failed.
+// function literal. Returns false (and deallocates any allocated AST nodes) if
+// parsing failed.
V8_EXPORT_PRIVATE bool ParseProgram(
ParseInfo* info, Handle<Script> script, Isolate* isolate,
ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
+// Parses the top-level source code represented by the parse info and sets its
+// function literal. Allows passing an |outer_scope| for programs that exist in
+// another scope (e.g. eval). Returns false (and deallocates any allocated AST
+// nodes) if parsing failed.
+V8_EXPORT_PRIVATE bool ParseProgram(
+ ParseInfo* info, Handle<Script> script, MaybeHandle<ScopeInfo> outer_scope,
+ Isolate* isolate,
+ ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
+
// Like ParseProgram but for an individual function which already has a
// allocated shared function info.
V8_EXPORT_PRIVATE bool ParseFunction(
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index ce514235a0..80d201d13f 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -16,7 +16,7 @@ namespace internal {
Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgumentString(
Isolate* isolate) const {
- if (arg_ != nullptr) return arg_->string().get<Factory>();
+ if (arg_ != nullptr) return arg_->string();
if (char_arg_ != nullptr) {
return isolate->factory()
->NewStringFromUtf8(CStrVector(char_arg_))
@@ -75,6 +75,12 @@ void PendingCompilationErrorHandler::ReportWarnings(Isolate* isolate,
}
}
+void PendingCompilationErrorHandler::ReportWarnings(OffThreadIsolate* isolate,
+ Handle<Script> script) {
+ // TODO(leszeks): Do nothing, re-report on the main thread.
+ UNREACHABLE();
+}
+
void PendingCompilationErrorHandler::ReportErrors(
Isolate* isolate, Handle<Script> script,
AstValueFactory* ast_value_factory) {
@@ -83,7 +89,7 @@ void PendingCompilationErrorHandler::ReportErrors(
} else {
DCHECK(has_pending_error());
// Internalize ast values for throwing the pending error.
- ast_value_factory->Internalize(isolate->factory());
+ ast_value_factory->Internalize(isolate);
ThrowPendingError(isolate, script);
}
}
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index cb2908eaf8..b854c3849e 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -52,6 +52,7 @@ class PendingCompilationErrorHandler {
// Handle warnings detected during compilation.
void ReportWarnings(Isolate* isolate, Handle<Script> script);
+ void ReportWarnings(OffThreadIsolate* isolate, Handle<Script> script);
V8_EXPORT_PRIVATE Handle<String> FormatErrorMessageForTest(
Isolate* isolate) const;
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
index a993fdf93f..11165da5ed 100644
--- a/deps/v8/src/parsing/preparse-data-impl.h
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -199,6 +199,7 @@ class ZonePreparseData : public ZoneObject {
int child_length);
Handle<PreparseData> Serialize(Isolate* isolate);
+ Handle<PreparseData> Serialize(OffThreadIsolate* isolate);
int children_length() const { return static_cast<int>(children_.size()); }
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 53c6256a5f..7161861b76 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -9,11 +9,13 @@
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
#include "src/handles/handles.h"
+#include "src/heap/off-thread-factory.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data-impl.h"
#include "src/parsing/preparser.h"
+#include "src/roots/roots.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
@@ -431,6 +433,16 @@ Handle<PreparseData> PreparseDataBuilder::ByteData::CopyToHeap(
return data;
}
+Handle<PreparseData> PreparseDataBuilder::ByteData::CopyToOffThreadHeap(
+ OffThreadIsolate* isolate, int children_length) {
+ DCHECK(is_finalized_);
+ int data_length = zone_byte_data_.length();
+ Handle<PreparseData> data =
+ isolate->factory()->NewPreparseData(data_length, children_length);
+ data->copy_in(0, zone_byte_data_.begin(), data_length);
+ return data;
+}
+
Handle<PreparseData> PreparseDataBuilder::Serialize(Isolate* isolate) {
DCHECK(HasData());
DCHECK(!ThisOrParentBailedOut());
@@ -447,6 +459,22 @@ Handle<PreparseData> PreparseDataBuilder::Serialize(Isolate* isolate) {
return data;
}
+Handle<PreparseData> PreparseDataBuilder::Serialize(OffThreadIsolate* isolate) {
+ DCHECK(HasData());
+ DCHECK(!ThisOrParentBailedOut());
+ Handle<PreparseData> data =
+ byte_data_.CopyToOffThreadHeap(isolate, num_inner_with_data_);
+ int i = 0;
+ DCHECK(finalized_children_);
+ for (const auto& builder : children_) {
+ if (!builder->HasData()) continue;
+ Handle<PreparseData> child_data = builder->Serialize(isolate);
+ data->set_child(i++, *child_data);
+ }
+ DCHECK_EQ(i, data->children_length());
+ return data;
+}
+
ZonePreparseData* PreparseDataBuilder::Serialize(Zone* zone) {
DCHECK(HasData());
DCHECK(!ThisOrParentBailedOut());
@@ -473,6 +501,10 @@ class BuilderProducedPreparseData final : public ProducedPreparseData {
return builder_->Serialize(isolate);
}
+ Handle<PreparseData> Serialize(OffThreadIsolate* isolate) final {
+ return builder_->Serialize(isolate);
+ }
+
ZonePreparseData* Serialize(Zone* zone) final {
return builder_->Serialize(zone);
}
@@ -491,6 +523,11 @@ class OnHeapProducedPreparseData final : public ProducedPreparseData {
return data_;
}
+ Handle<PreparseData> Serialize(OffThreadIsolate* isolate) final {
+ // Not required.
+ UNREACHABLE();
+ }
+
ZonePreparseData* Serialize(Zone* zone) final {
// Not required.
UNREACHABLE();
@@ -508,6 +545,10 @@ class ZoneProducedPreparseData final : public ProducedPreparseData {
return data_->Serialize(isolate);
}
+ Handle<PreparseData> Serialize(OffThreadIsolate* isolate) final {
+ return data_->Serialize(isolate);
+ }
+
ZonePreparseData* Serialize(Zone* zone) final { return data_; }
private:
@@ -750,6 +791,22 @@ Handle<PreparseData> ZonePreparseData::Serialize(Isolate* isolate) {
return result;
}
+Handle<PreparseData> ZonePreparseData::Serialize(OffThreadIsolate* isolate) {
+ int data_size = static_cast<int>(byte_data()->size());
+ int child_data_length = children_length();
+ Handle<PreparseData> result =
+ isolate->factory()->NewPreparseData(data_size, child_data_length);
+ result->copy_in(0, byte_data()->data(), data_size);
+
+ for (int i = 0; i < child_data_length; i++) {
+ ZonePreparseData* child = get_child(i);
+ DCHECK_NOT_NULL(child);
+ Handle<PreparseData> child_data = child->Serialize(isolate);
+ result->set_child(i, *child_data);
+ }
+ return result;
+}
+
ZoneConsumedPreparseData::ZoneConsumedPreparseData(Zone* zone,
ZonePreparseData* data)
: data_(data), scope_data_wrapper_(data_->byte_data()) {
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 581adfa1d5..409942f8c3 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -140,6 +140,8 @@ class V8_EXPORT_PRIVATE PreparseDataBuilder : public ZoneObject,
void Finalize(Zone* zone);
Handle<PreparseData> CopyToHeap(Isolate* isolate, int children_length);
+ Handle<PreparseData> CopyToOffThreadHeap(OffThreadIsolate* isolate,
+ int children_length);
inline ZonePreparseData* CopyToZone(Zone* zone, int children_length);
void Reserve(size_t bytes);
@@ -208,6 +210,7 @@ class V8_EXPORT_PRIVATE PreparseDataBuilder : public ZoneObject,
friend class BuilderProducedPreparseData;
Handle<PreparseData> Serialize(Isolate* isolate);
+ Handle<PreparseData> Serialize(OffThreadIsolate* isolate);
ZonePreparseData* Serialize(Zone* zone);
void FinalizeChildren(Zone* zone);
@@ -250,6 +253,11 @@ class ProducedPreparseData : public ZoneObject {
// MaybeHandle.
virtual Handle<PreparseData> Serialize(Isolate* isolate) = 0;
+ // If there is data (if the Scope contains skippable inner functions), move
+ // the data into the heap and return a Handle to it; otherwise return a null
+ // MaybeHandle.
+ virtual Handle<PreparseData> Serialize(OffThreadIsolate* isolate) = 0;
+
// If there is data (if the Scope contains skippable inner functions), return
// an off-heap ZonePreparseData representing the data; otherwise
// return nullptr.
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 291abd0558..3c1122ef00 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -416,6 +416,10 @@ class PreParserStatement {
return PreParserStatement(kUnknownStatement);
}
+ static PreParserStatement Iteration() {
+ return PreParserStatement(kIterationStatement);
+ }
+
static PreParserStatement Null() {
return PreParserStatement(kNullStatement);
}
@@ -450,6 +454,8 @@ class PreParserStatement {
bool IsNull() { return code_ == kNullStatement; }
+ bool IsIterationStatement() { return code_ == kIterationStatement; }
+
bool IsEmptyStatement() {
DCHECK(!IsNull());
return code_ == kEmptyStatement;
@@ -478,6 +484,7 @@ class PreParserStatement {
kEmptyStatement,
kUnknownStatement,
kJumpStatement,
+ kIterationStatement,
kStringLiteralExpressionStatement,
};
@@ -691,8 +698,7 @@ class PreParserFactory {
return PreParserBlock::Default();
}
- PreParserBlock NewBlock(bool ignore_completion_value,
- ZonePtrList<const AstRawString>* labels) {
+ PreParserBlock NewBlock(bool ignore_completion_value, bool is_breakable) {
return PreParserBlock::Default();
}
@@ -737,20 +743,15 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewDoWhileStatement(
- ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos) {
- return PreParserStatement::Default();
+ PreParserStatement NewDoWhileStatement(int pos) {
+ return PreParserStatement::Iteration();
}
- PreParserStatement NewWhileStatement(
- ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos) {
- return PreParserStatement::Default();
+ PreParserStatement NewWhileStatement(int pos) {
+ return PreParserStatement::Iteration();
}
- PreParserStatement NewSwitchStatement(ZonePtrList<const AstRawString>* labels,
- const PreParserExpression& tag,
+ PreParserStatement NewSwitchStatement(const PreParserExpression& tag,
int pos) {
return PreParserStatement::Default();
}
@@ -761,23 +762,17 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewForStatement(
- ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos) {
- return PreParserStatement::Default();
+ PreParserStatement NewForStatement(int pos) {
+ return PreParserStatement::Iteration();
}
- PreParserStatement NewForEachStatement(
- ForEachStatement::VisitMode visit_mode,
- ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos) {
- return PreParserStatement::Default();
+ PreParserStatement NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+ int pos) {
+ return PreParserStatement::Iteration();
}
- PreParserStatement NewForOfStatement(
- ZonePtrList<const AstRawString>* labels,
- ZonePtrList<const AstRawString>* own_labels, int pos, IteratorType type) {
- return PreParserStatement::Default();
+ PreParserStatement NewForOfStatement(int pos, IteratorType type) {
+ return PreParserStatement::Iteration();
}
PreParserExpression NewCallRuntime(
@@ -819,17 +814,6 @@ class PreParserFormalParameters : public FormalParametersBase {
bool strict_parameter_error_ = false;
};
-class PreParserTarget {
- public:
- PreParserTarget(ParserBase<PreParser>* preparser,
- PreParserStatement statement) {}
-};
-
-class PreParserTargetScope {
- public:
- explicit PreParserTargetScope(ParserBase<PreParser>* preparser) {}
-};
-
class PreParserFuncNameInferrer {
public:
explicit PreParserFuncNameInferrer(AstValueFactory* avf) {}
@@ -904,8 +888,6 @@ struct ParserTypes<PreParser> {
using FuncNameInferrer = PreParserFuncNameInferrer;
using SourceRange = PreParserSourceRange;
using SourceRangeScope = PreParserSourceRangeScope;
- using Target = PreParserTarget;
- using TargetScope = PreParserTargetScope;
};
@@ -1070,18 +1052,6 @@ class PreParser : public ParserBase<PreParser> {
const PreParserScopedStatementList* body, PreParserStatement block,
const PreParserExpression& return_value) {}
- V8_INLINE void DeclareLabel(ZonePtrList<const AstRawString>** labels,
- ZonePtrList<const AstRawString>** own_labels,
- const AstRawString* label) {
- DCHECK(!parsing_module_ || !label->IsOneByteEqualTo("await"));
- }
-
- // TODO(nikolaos): The preparser currently does not keep track of labels.
- V8_INLINE bool ContainsLabel(ZonePtrList<const AstRawString>* labels,
- const PreParserIdentifier& label) {
- return false;
- }
-
V8_INLINE PreParserExpression
RewriteReturn(const PreParserExpression& return_value, int pos) {
return return_value;
@@ -1186,17 +1156,6 @@ class PreParser : public ParserBase<PreParser> {
bool IdentifierEquals(const PreParserIdentifier& identifier,
const AstRawString* other);
- // TODO(nikolaos): The preparser currently does not keep track of labels
- // and targets.
- V8_INLINE PreParserStatement
- LookupBreakTarget(const PreParserIdentifier& label) {
- return PreParserStatement::Default();
- }
- V8_INLINE PreParserStatement
- LookupContinueTarget(const PreParserIdentifier& label) {
- return PreParserStatement::Default();
- }
-
V8_INLINE PreParserStatement DeclareFunction(
const PreParserIdentifier& variable_name,
const PreParserExpression& function, VariableMode mode, VariableKind kind,
@@ -1498,7 +1457,7 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
MessageTemplate message,
const PreParserIdentifier& arg) {
- UNREACHABLE();
+ ReportMessageAt(source_location, message, arg.string_);
}
void ReportMessageAt(Scanner::Location source_location,
@@ -1512,6 +1471,8 @@ class PreParser : public ParserBase<PreParser> {
return arg.string_;
}
+ PreParserStatement AsIterationStatement(PreParserStatement s) { return s; }
+
// "null" return type creators.
V8_INLINE static PreParserIdentifier NullIdentifier() {
return PreParserIdentifier::Null();
@@ -1538,6 +1499,10 @@ class PreParser : public ParserBase<PreParser> {
return subject.IsNull();
}
+ V8_INLINE static bool IsIterationStatement(PreParserStatement subject) {
+ return subject.IsIterationStatement();
+ }
+
V8_INLINE PreParserIdentifier EmptyIdentifierString() const {
PreParserIdentifier result = PreParserIdentifier::Default();
result.string_ = ast_value_factory()->empty_string();
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 013ea68fac..75ec661d2d 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -146,7 +146,7 @@ void Processor::VisitBlock(Block* node) {
// returns 'undefined'. To obtain the same behavior with v8, we need
// to prevent rewriting in that case.
if (!node->ignore_completion_value()) {
- BreakableScope scope(this, node->labels() != nullptr);
+ BreakableScope scope(this, node->is_breakable());
Process(node->statements());
}
replacement_ = node;
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 49c7e1f793..1414b3490b 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -792,10 +792,35 @@ std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
const char* data, size_t length) {
+ if (data == nullptr) {
+ DCHECK_EQ(length, 0);
+
+ // We don't want to pass in a null pointer into the the character stream,
+ // because then the one-past-the-end pointer is undefined, so instead pass
+ // through this static array.
+ static const char non_null_empty_string[1] = {0};
+ data = non_null_empty_string;
+ }
+
return std::unique_ptr<Utf16CharacterStream>(
new BufferedCharacterStream<TestingStream>(
- static_cast<size_t>(0), reinterpret_cast<const uint8_t*>(data),
- static_cast<size_t>(length)));
+ 0, reinterpret_cast<const uint8_t*>(data), length));
+}
+
+std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
+ const uint16_t* data, size_t length) {
+ if (data == nullptr) {
+ DCHECK_EQ(length, 0);
+
+ // We don't want to pass in a null pointer into the the character stream,
+ // because then the one-past-the-end pointer is undefined, so instead pass
+ // through this static array.
+ static const uint16_t non_null_empty_uint16_t_string[1] = {0};
+ data = non_null_empty_uint16_t_string;
+ }
+
+ return std::unique_ptr<Utf16CharacterStream>(
+ new UnbufferedCharacterStream<TestingStream>(0, data, length));
}
Utf16CharacterStream* ScannerStream::For(
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index c4c7064013..09181356f0 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -31,6 +31,8 @@ class V8_EXPORT_PRIVATE ScannerStream {
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data,
size_t length);
+ static std::unique_ptr<Utf16CharacterStream> ForTesting(const uint16_t* data,
+ size_t length);
};
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 5d5fbdbab6..91e4183d53 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -576,7 +576,8 @@ Token::Value Scanner::ScanTemplateSpan() {
return result;
}
-Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
+template <typename LocalIsolate>
+Handle<String> Scanner::SourceUrl(LocalIsolate* isolate) const {
Handle<String> tmp;
if (source_url_.length() > 0) {
tmp = source_url_.Internalize(isolate);
@@ -584,7 +585,11 @@ Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
return tmp;
}
-Handle<String> Scanner::SourceMappingUrl(Isolate* isolate) const {
+template Handle<String> Scanner::SourceUrl(Isolate* isolate) const;
+template Handle<String> Scanner::SourceUrl(OffThreadIsolate* isolate) const;
+
+template <typename LocalIsolate>
+Handle<String> Scanner::SourceMappingUrl(LocalIsolate* isolate) const {
Handle<String> tmp;
if (source_mapping_url_.length() > 0) {
tmp = source_mapping_url_.Internalize(isolate);
@@ -592,6 +597,10 @@ Handle<String> Scanner::SourceMappingUrl(Isolate* isolate) const {
return tmp;
}
+template Handle<String> Scanner::SourceMappingUrl(Isolate* isolate) const;
+template Handle<String> Scanner::SourceMappingUrl(
+ OffThreadIsolate* isolate) const;
+
bool Scanner::ScanDigitsWithNumericSeparators(bool (*predicate)(uc32 ch),
bool is_check_first_digit) {
// we must have at least one digit after 'x'/'b'/'o'
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 2e953f6e6a..bed63c9d4e 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -403,8 +403,10 @@ class V8_EXPORT_PRIVATE Scanner {
return ScanTemplateSpan();
}
- Handle<String> SourceUrl(Isolate* isolate) const;
- Handle<String> SourceMappingUrl(Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<String> SourceUrl(LocalIsolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<String> SourceMappingUrl(LocalIsolate* isolate) const;
bool FoundHtmlComment() const { return found_html_comment_; }
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index c4f2777e14..2ae2222419 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -978,7 +978,7 @@ void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
int context_locals = scope_info.ContextLocalCount();
for (int i = 0; i < context_locals; ++i) {
String local_name = scope_info.ContextLocalName(i);
- int idx = Context::MIN_CONTEXT_SLOTS + i;
+ int idx = scope_info.ContextHeaderLength() + i;
SetContextReference(entry, local_name, context.get(idx),
Context::OffsetOfElementAt(idx));
}
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 9a8fd20448..4c44b8b4a3 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -16,7 +16,7 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* resource_name, int line_number,
int column_number,
std::unique_ptr<SourcePositionTable> line_info,
- Address instruction_start, bool is_shared_cross_origin)
+ bool is_shared_cross_origin)
: bit_field_(TagField::encode(tag) |
BuiltinIdField::encode(Builtins::builtin_count) |
SharedCrossOriginField::encode(is_shared_cross_origin)),
@@ -26,11 +26,11 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
column_number_(column_number),
script_id_(v8::UnboundScript::kNoScriptId),
position_(0),
- line_info_(std::move(line_info)),
- instruction_start_(instruction_start) {}
+ line_info_(std::move(line_info)) {}
-inline CodeEntry* ProfileGenerator::FindEntry(Address address) {
- CodeEntry* entry = code_map_->FindEntry(address);
+inline CodeEntry* ProfileGenerator::FindEntry(Address address,
+ Address* out_instruction_start) {
+ CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
if (entry) entry->mark_used();
return entry;
}
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 11821035d5..b8389f4350 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -229,8 +229,6 @@ void CodeEntry::print() const {
base::OS::Print(" - column_number: %d\n", column_number_);
base::OS::Print(" - script_id: %d\n", script_id_);
base::OS::Print(" - position: %d\n", position_);
- base::OS::Print(" - instruction_start: %p\n",
- reinterpret_cast<void*>(instruction_start_));
if (line_info_) {
line_info_->print();
@@ -494,6 +492,11 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
profiler_(profiler),
streaming_next_sample_(0),
id_(++last_id_) {
+ // The startTime timestamp is not converted to Perfetto's clock domain and
+ // will get out of sync with other timestamps Perfetto knows about, including
+ // the automatic trace event "ts" timestamp. startTime is included for
+ // backward compatibility with the tracing protocol but the value of "ts"
+ // should be used instead (it is recorded nearly immediately after).
auto value = TracedValue::Create();
value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
@@ -603,6 +606,16 @@ void CpuProfile::StreamPendingTraceEvents() {
value->EndDictionary();
}
if (streaming_next_sample_ != samples_.size()) {
+ // timeDeltas are computed within CLOCK_MONOTONIC. However, trace event
+ // "ts" timestamps are converted to CLOCK_BOOTTIME by Perfetto. To get
+ // absolute timestamps in CLOCK_BOOTTIME from timeDeltas, add them to
+ // the "ts" timestamp from the initial "Profile" trace event sent by
+ // CpuProfile::CpuProfile().
+ //
+ // Note that if the system is suspended and resumed while samples_ is
+ // captured, timeDeltas derived after resume will not be convertible to
+ // correct CLOCK_BOOTTIME time values (for instance, producing
+ // CLOCK_BOOTTIME time values in the middle of the suspended period).
value->BeginArray("timeDeltas");
base::TimeTicks lastTimestamp =
streaming_next_sample_ ? samples_[streaming_next_sample_ - 1].timestamp
@@ -636,6 +649,12 @@ void CpuProfile::FinishProfile() {
context_filter_ = nullptr;
StreamPendingTraceEvents();
auto value = TracedValue::Create();
+ // The endTime timestamp is not converted to Perfetto's clock domain and will
+ // get out of sync with other timestamps Perfetto knows about, including the
+ // automatic trace event "ts" timestamp. endTime is included for backward
+ // compatibility with the tracing protocol: its presence in "data" is used by
+ // devtools to identify the last ProfileChunk but the value of "ts" should be
+ // used instead (it is recorded nearly immediately after).
value->SetDouble("endTime", end_time_.since_origin().InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
"ProfileChunk", id_, "data", std::move(value));
@@ -664,8 +683,6 @@ void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
ClearCodesInRange(addr, addr + size);
unsigned index = AddCodeEntry(addr, entry);
code_map_.emplace(addr, CodeEntryMapInfo{index, size});
- DCHECK(entry->instruction_start() == kNullAddress ||
- addr == entry->instruction_start());
}
void CodeMap::ClearCodesInRange(Address start, Address end) {
@@ -683,17 +700,15 @@ void CodeMap::ClearCodesInRange(Address start, Address end) {
code_map_.erase(left, right);
}
-CodeEntry* CodeMap::FindEntry(Address addr) {
+CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
auto it = code_map_.upper_bound(addr);
if (it == code_map_.begin()) return nullptr;
--it;
Address start_address = it->first;
Address end_address = start_address + it->second.size;
CodeEntry* ret = addr < end_address ? entry(it->second.index) : nullptr;
- if (ret && ret->instruction_start() != kNullAddress) {
- DCHECK_EQ(start_address, ret->instruction_start());
- DCHECK(addr >= start_address && addr < end_address);
- }
+ DCHECK(!ret || (addr >= start_address && addr < end_address));
+ if (ret && out_instruction_start) *out_instruction_start = start_address;
return ret;
}
@@ -706,9 +721,6 @@ void CodeMap::MoveCode(Address from, Address to) {
DCHECK(from + info.size <= to || to + info.size <= from);
ClearCodesInRange(to, to + info.size);
code_map_.emplace(to, info);
-
- CodeEntry* entry = code_entries_[info.index].entry;
- entry->set_instruction_start(to);
}
unsigned CodeMap::AddCodeEntry(Address start, CodeEntry* entry) {
@@ -886,13 +898,15 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
true});
} else {
Address attributed_pc = reinterpret_cast<Address>(sample.pc);
- CodeEntry* pc_entry = FindEntry(attributed_pc);
+ Address pc_entry_instruction_start = kNullAddress;
+ CodeEntry* pc_entry =
+ FindEntry(attributed_pc, &pc_entry_instruction_start);
// If there is no pc_entry, we're likely in native code. Find out if the
// top of the stack (the return address) was pointing inside a JS
// function, meaning that we have encountered a frameless invocation.
if (!pc_entry && !sample.has_external_callback) {
attributed_pc = reinterpret_cast<Address>(sample.tos);
- pc_entry = FindEntry(attributed_pc);
+ pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
}
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed, SafeStackFrameIterator incorrectly thinks that
@@ -900,7 +914,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// caller's frame. Check for this case and just skip such samples.
if (pc_entry) {
int pc_offset =
- static_cast<int>(attributed_pc - pc_entry->instruction_start());
+ static_cast<int>(attributed_pc - pc_entry_instruction_start);
// TODO(petermarshall): pc_offset can still be negative in some cases.
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
@@ -932,12 +946,12 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
for (unsigned i = 0; i < sample.frames_count; ++i) {
Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
Address native_context = reinterpret_cast<Address>(sample.contexts[i]);
- CodeEntry* entry = FindEntry(stack_pos);
+ Address instruction_start = kNullAddress;
+ CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
int line_number = no_line_info;
if (entry) {
// Find out if the entry has an inlining stack associated.
- int pc_offset =
- static_cast<int>(stack_pos - entry->instruction_start());
+ int pc_offset = static_cast<int>(stack_pos - instruction_start);
// TODO(petermarshall): pc_offset can still be negative in some cases.
const std::vector<CodeEntryAndLineNumber>* inline_stack =
entry->GetInlineStack(pc_offset);
@@ -1014,6 +1028,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
case PARSER:
case COMPILER:
case BYTECODE_COMPILER:
+ case ATOMICS_WAIT:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 38e31c5596..9f7ef34d18 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -65,7 +65,6 @@ class CodeEntry {
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
std::unique_ptr<SourcePositionTable> line_info = nullptr,
- Address instruction_start = kNullAddress,
bool is_shared_cross_origin = false);
const char* name() const { return name_; }
@@ -136,9 +135,6 @@ class CodeEntry {
const std::vector<CodeEntryAndLineNumber>* GetInlineStack(
int pc_offset) const;
- void set_instruction_start(Address start) { instruction_start_ = start; }
- Address instruction_start() const { return instruction_start_; }
-
CodeEventListener::LogEventsAndTags tag() const {
return TagField::decode(bit_field_);
}
@@ -223,7 +219,6 @@ class CodeEntry {
int script_id_;
int position_;
std::unique_ptr<SourcePositionTable> line_info_;
- Address instruction_start_;
std::unique_ptr<RareData> rare_data_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
@@ -449,7 +444,7 @@ class V8_EXPORT_PRIVATE CodeMap {
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
- CodeEntry* FindEntry(Address addr);
+ CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
void Print();
private:
@@ -533,7 +528,8 @@ class V8_EXPORT_PRIVATE ProfileGenerator {
CodeMap* code_map() { return code_map_; }
private:
- CodeEntry* FindEntry(Address address);
+ CodeEntry* FindEntry(Address address,
+ Address* out_instruction_start = nullptr);
CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 29eea0f4ca..654b751a6c 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -38,8 +38,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->instruction_start = code->InstructionStart();
rec->entry = new CodeEntry(tag, GetName(name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -52,8 +51,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->instruction_start = code->InstructionStart();
rec->entry = new CodeEntry(tag, GetName(*name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -68,8 +66,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->entry = new CodeEntry(tag, GetName(shared->DebugName()),
GetName(InferScriptName(*script_name, *shared)),
CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr);
DCHECK(!code->IsCode());
rec->entry->FillFunctionInfo(*shared);
rec->instruction_size = code->InstructionSize();
@@ -164,7 +161,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
std::unique_ptr<CodeEntry> inline_entry = std::make_unique<CodeEntry>(
tag, GetFunctionName(*pos_info.shared), resource_name,
start_pos_info.line + 1, start_pos_info.column + 1, nullptr,
- code->InstructionStart(), inline_is_shared_cross_origin);
+ inline_is_shared_cross_origin);
inline_entry->FillFunctionInfo(*pos_info.shared);
// Create a canonical CodeEntry for each inlined frame and then re-use
@@ -182,8 +179,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->entry =
new CodeEntry(tag, GetFunctionName(*shared),
GetName(InferScriptName(*script_name, *shared)), line,
- column, std::move(line_table),
- abstract_code->InstructionStart(), is_shared_cross_origin);
+ column, std::move(line_table), is_shared_cross_origin);
if (!inline_stacks.empty()) {
rec->entry->SetInlineStacks(std::move(cached_inline_entries),
std::move(inline_stacks));
@@ -200,10 +196,10 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->instruction_start();
- rec->entry = new CodeEntry(
- tag, GetName(name), CodeEntry::kWasmResourceNamePrefix,
- CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- nullptr, code->instruction_start(), true);
+ rec->entry =
+ new CodeEntry(tag, GetName(name), CodeEntry::kWasmResourceNamePrefix,
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, true);
rec->instruction_size = code->instructions().length();
DispatchCodeEvent(evt_rec);
}
@@ -247,7 +243,7 @@ void ProfilerListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
rec->entry = new CodeEntry(
CodeEventListener::REG_EXP_TAG, GetConsName("RegExp: ", *source),
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr, code->InstructionStart());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index f5aa1dc3a0..e7d780e084 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -178,6 +178,9 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
case IDLE:
name = "(IDLE)";
break;
+ // Treat atomics wait as a normal JS event; we don't care about the
+ // difference for allocations.
+ case ATOMICS_WAIT:
case JS:
name = "(JS)";
break;
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index eeb5261f2c..92143e1ce0 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -22,7 +22,7 @@ StringsStorage::StringsStorage() : names_(StringsMatch) {}
StringsStorage::~StringsStorage() {
for (base::HashMap::Entry* p = names_.Start(); p != nullptr;
p = names_.Next(p)) {
- DeleteArray(reinterpret_cast<const char*>(p->value));
+ DeleteArray(reinterpret_cast<const char*>(p->key));
}
}
@@ -34,9 +34,10 @@ const char* StringsStorage::GetCopy(const char* src) {
StrNCpy(dst, src, len);
dst[len] = '\0';
entry->key = dst.begin();
- entry->value = entry->key;
}
- return reinterpret_cast<const char*>(entry->value);
+ entry->value =
+ reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) + 1);
+ return reinterpret_cast<const char*>(entry->key);
}
const char* StringsStorage::GetFormatted(const char* format, ...) {
@@ -52,11 +53,12 @@ const char* StringsStorage::AddOrDisposeString(char* str, int len) {
if (entry->value == nullptr) {
// New entry added.
entry->key = str;
- entry->value = str;
} else {
DeleteArray(str);
}
- return reinterpret_cast<const char*>(entry->value);
+ entry->value =
+ reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) + 1);
+ return reinterpret_cast<const char*>(entry->key);
}
const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
@@ -106,6 +108,30 @@ const char* StringsStorage::GetConsName(const char* prefix, Name name) {
return "";
}
+bool StringsStorage::Release(const char* str) {
+ int len = static_cast<int>(strlen(str));
+ uint32_t hash = StringHasher::HashSequentialString(str, len, kZeroHashSeed);
+ base::HashMap::Entry* entry = names_.Lookup(const_cast<char*>(str), hash);
+ DCHECK(entry);
+ if (!entry) {
+ return false;
+ }
+
+ DCHECK(entry->value);
+ entry->value =
+ reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) - 1);
+
+ if (entry->value == 0) {
+ names_.Remove(const_cast<char*>(str), hash);
+ DeleteArray(str);
+ }
+ return true;
+}
+
+size_t StringsStorage::GetStringCountForTesting() const {
+ return names_.occupancy();
+}
+
base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = StringHasher::HashSequentialString(str, len, kZeroHashSeed);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 650ecac3e6..4948957f83 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -35,6 +35,13 @@ class V8_EXPORT_PRIVATE StringsStorage {
// Appends string resulting from name to prefix, then returns the stored
// result.
const char* GetConsName(const char* prefix, Name name);
+ // Reduces the refcount of the given string, freeing it if no other
+ // references are made to it.
+ // Returns true if the string was successfully unref'd.
+ bool Release(const char* str);
+
+ // Returns the number of strings in the store.
+ size_t GetStringCountForTesting() const;
private:
static bool StringsMatch(void* key1, void* key2);
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 6c1b8a9e90..00bff91cd0 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -110,7 +110,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
}
state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 8f9da563a9..03dac337e0 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -110,6 +110,8 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
success_label_(),
backtrack_label_(),
exit_label_() {
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
@@ -221,9 +223,8 @@ void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(eq, on_equal);
}
-
void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ ldr(r0, register_location(start_reg)); // Index of start of capture
__ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
@@ -315,7 +316,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0: Address byte_offset1 - Address captured substring's start.
// r1: Address byte_offset2 - Address of current character position.
// r2: size_t byte_length - length of capture in bytes(!)
- // r3: Isolate* isolate or 0 if unicode flag.
+ // r3: Isolate* isolate.
// Address of start of capture.
__ add(r0, r0, Operand(end_of_input_address()));
@@ -329,14 +330,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ sub(r1, r1, r4);
}
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ mov(r3, Operand(0));
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- }
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -360,7 +354,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerARM::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 6320913f4c..22628fb760 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 56658819b1..43a6bdf912 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -120,10 +120,14 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
success_label_(),
backtrack_label_(),
exit_label_() {
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
// We can cache at most 16 W registers in x0-x7.
STATIC_ASSERT(kNumCachedRegisters <= 16);
STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
+ __ CallTarget();
+
__ B(&entry_label_); // We'll write the entry code later.
__ Bind(&start_label_); // And then continue from here.
}
@@ -212,6 +216,9 @@ void RegExpMacroAssemblerARM64::Bind(Label* label) {
__ Bind(label);
}
+void RegExpMacroAssemblerARM64::BindJumpTarget(Label* label) {
+ __ BindJumpTarget(label);
+}
void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
@@ -286,9 +293,8 @@ void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(eq, on_equal);
}
-
void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
Register capture_start_offset = w10;
@@ -402,7 +408,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
// x0: Address byte_offset1 - Address captured substring's start.
// x1: Address byte_offset2 - Address of current character position.
// w2: size_t byte_length - length of capture in bytes(!)
- // x3: Isolate* isolate or 0 if unicode flag
+ // x3: Isolate* isolate.
// Address of start of capture.
__ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
@@ -414,14 +420,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Sub(x1, x1, Operand(capture_length, SXTW));
}
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ Mov(x3, Operand(0));
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ Mov(x3, ExternalReference::isolate_address(isolate()));
- }
+ __ Mov(x3, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -737,10 +736,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
CPURegList argument_registers(x0, x5, x6, x7);
CPURegList registers_to_retain = kCalleeSaved;
- DCHECK_EQ(11, kCalleeSaved.Count());
+ registers_to_retain.Combine(fp);
registers_to_retain.Combine(lr);
- __ PushCPURegList(registers_to_retain);
+ DCHECK(registers_to_retain.IncludesAliasOf(lr));
+ __ PushCPURegList<TurboAssembler::kSignLR>(registers_to_retain);
__ PushCPURegList(argument_registers);
// Set frame pointer in place.
@@ -1035,7 +1035,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Mov(sp, fp);
// Restore registers.
- __ PopCPURegList(registers_to_retain);
+ __ PopCPURegList<TurboAssembler::kAuthLR>(registers_to_retain);
__ Ret();
@@ -1585,14 +1585,14 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
- __ Pop(lr, xzr);
+ __ Pop<TurboAssembler::kAuthLR>(padreg, lr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
__ Sub(lr, lr, Operand(masm_->CodeObject()));
- __ Push(xzr, lr);
+ __ Push<TurboAssembler::kSignLR>(lr, padreg);
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index cee9e2c97e..91b5e90bf5 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -42,7 +42,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -65,6 +65,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
+ virtual void BindJumpTarget(Label* label = nullptr);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
diff --git a/deps/v8/src/regexp/gen-regexp-special-case.cc b/deps/v8/src/regexp/gen-regexp-special-case.cc
index 8aace6ab88..9606c5d70d 100644
--- a/deps/v8/src/regexp/gen-regexp-special-case.cc
+++ b/deps/v8/src/regexp/gen-regexp-special-case.cc
@@ -1,4 +1,4 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
+// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,19 +7,19 @@
#include <iostream>
#include <sstream>
-#include "src/base/logging.h"
-#include "unicode/uchar.h"
-#include "unicode/uniset.h"
+#include "src/regexp/special-case.h"
namespace v8 {
namespace internal {
-// The following code generates BuildSpecialAddSet() and BuildIgnoreSet()
-// functions into "src/regexp/special-case.cc".
-// See more details in http://shorturl.at/adfO5
-void PrintSet(std::ofstream& out, const char* func_name,
+static const uc32 kSurrogateStart = 0xd800;
+static const uc32 kSurrogateEnd = 0xdfff;
+static const uc32 kNonBmpStart = 0x10000;
+
+// The following code generates "src/regexp/special-case.cc".
+void PrintSet(std::ofstream& out, const char* name,
const icu::UnicodeSet& set) {
- out << "icu::UnicodeSet " << func_name << "() {\n"
+ out << "icu::UnicodeSet Build" << name << "() {\n"
<< " icu::UnicodeSet set;\n";
for (int32_t i = 0; i < set.getRangeCount(); i++) {
if (set.getRangeStart(i) == set.getRangeEnd(i)) {
@@ -31,73 +31,113 @@ void PrintSet(std::ofstream& out, const char* func_name,
}
out << " set.freeze();\n"
<< " return set;\n"
- << "}\n";
+ << "}\n\n";
+
+ out << "struct " << name << "Data {\n"
+ << " " << name << "Data() : set(Build" << name << "()) {}\n"
+ << " const icu::UnicodeSet set;\n"
+ << "};\n\n";
+
+ out << "//static\n"
+ << "const icu::UnicodeSet& RegExpCaseFolding::" << name << "() {\n"
+ << " static base::LazyInstance<" << name << "Data>::type set =\n"
+ << " LAZY_INSTANCE_INITIALIZER;\n"
+ << " return set.Pointer()->set;\n"
+ << "}\n\n";
}
void PrintSpecial(std::ofstream& out) {
icu::UnicodeSet current;
- icu::UnicodeSet processed(0xd800, 0xdbff); // Ignore surrogate range.
icu::UnicodeSet special_add;
icu::UnicodeSet ignore;
UErrorCode status = U_ZERO_ERROR;
icu::UnicodeSet upper("[\\p{Lu}]", status);
CHECK(U_SUCCESS(status));
- // Iterate through all chars in BMP except ASCII and Surrogate.
- for (UChar32 i = 0x80; i < 0x010000; i++) {
- // Ignore those characters which is already processed.
- if (!processed.contains(i)) {
- current.set(i, i);
- current.closeOver(USET_CASE_INSENSITIVE);
- // Remember we already processed current.
- processed.addAll(current);
-
- // All uppercase characters in current.
- icu::UnicodeSet keep_upper(current);
- keep_upper.retainAll(upper);
-
- // Check if we have more than one uppercase character in current.
- // If there are more than one uppercase character, then it is a special
- // set which need to be added into either "Special Add" set or "Ignore"
- // set.
- int32_t number_of_upper = 0;
- for (int32_t i = 0; i < keep_upper.getRangeCount() && i <= 1; i++) {
- number_of_upper +=
- keep_upper.getRangeEnd(i) - keep_upper.getRangeStart(i) + 1;
+ // Iterate through all chars in BMP except surrogates.
+ for (UChar32 i = 0; i < kNonBmpStart; i++) {
+ if (i >= kSurrogateStart && i <= kSurrogateEnd) {
+ continue; // Ignore surrogate range
+ }
+ current.set(i, i);
+ current.closeOver(USET_CASE_INSENSITIVE);
+
+ // Check to see if all characters in the case-folding equivalence
+ // class as defined by UnicodeSet::closeOver all map to the same
+ // canonical value.
+ UChar32 canonical = RegExpCaseFolding::Canonicalize(i);
+ bool class_has_matching_canonical_char = false;
+ bool class_has_non_matching_canonical_char = false;
+ for (int32_t j = 0; j < current.getRangeCount(); j++) {
+ for (UChar32 c = current.getRangeStart(j); c <= current.getRangeEnd(j);
+ c++) {
+ if (c == i) {
+ continue;
+ }
+ UChar32 other_canonical = RegExpCaseFolding::Canonicalize(c);
+ if (canonical == other_canonical) {
+ class_has_matching_canonical_char = true;
+ } else {
+ class_has_non_matching_canonical_char = true;
+ }
+ }
+ }
+ // If any other character in i's equivalence class has a
+ // different canonical value, then i needs special handling. If
+ // no other character shares a canonical value with i, we can
+ // ignore i when adding alternatives for case-independent
+ // comparison. If at least one other character shares a
+ // canonical value, then i needs special handling.
+ if (class_has_non_matching_canonical_char) {
+ if (class_has_matching_canonical_char) {
+ special_add.add(i);
+ } else {
+ ignore.add(i);
}
- if (number_of_upper > 1) {
- // Add all non uppercase characters (could be Ll or Mn) to special add
- // set.
- current.removeAll(upper);
- special_add.addAll(current);
-
- // Add the uppercase characters of non uppercase character to
- // special add set.
- CHECK_GT(current.getRangeCount(), 0);
- UChar32 main_upper = u_toupper(current.getRangeStart(0));
- special_add.add(main_upper);
-
- // Add all uppercase except the main upper to ignore set.
- keep_upper.remove(main_upper);
- ignore.addAll(keep_upper);
+ }
+ }
+
+ // Verify that no Unicode equivalence class contains two non-trivial
+ // JS equivalence classes. Every character in SpecialAddSet has the
+ // same canonical value as every other non-IgnoreSet character in
+ // its Unicode equivalence class. Therefore, if we call closeOver on
+ // a set containing no IgnoreSet characters, the only characters
+ // that must be removed from the result are in IgnoreSet. This fact
+ // is used in CharacterRange::AddCaseEquivalents.
+ for (int32_t i = 0; i < special_add.getRangeCount(); i++) {
+ for (UChar32 c = special_add.getRangeStart(i);
+ c <= special_add.getRangeEnd(i); c++) {
+ UChar32 canonical = RegExpCaseFolding::Canonicalize(c);
+ current.set(c, c);
+ current.closeOver(USET_CASE_INSENSITIVE);
+ current.removeAll(ignore);
+ for (int32_t j = 0; j < current.getRangeCount(); j++) {
+ for (UChar32 c2 = current.getRangeStart(j);
+ c2 <= current.getRangeEnd(j); c2++) {
+ CHECK_EQ(canonical, RegExpCaseFolding::Canonicalize(c2));
+ }
}
}
}
- // Remove any ASCII
- special_add.remove(0x0000, 0x007f);
- PrintSet(out, "BuildIgnoreSet", ignore);
- PrintSet(out, "BuildSpecialAddSet", special_add);
+ PrintSet(out, "IgnoreSet", ignore);
+ PrintSet(out, "SpecialAddSet", special_add);
}
void WriteHeader(const char* header_filename) {
std::ofstream out(header_filename);
out << std::hex << std::setfill('0') << std::setw(4);
-
- out << "// Automatically generated by regexp/gen-regexp-special-case.cc\n"
- << "// The following functions are used to build icu::UnicodeSet\n"
- << "// for specical cases different between Unicode and ECMA262.\n"
+ out << "// Copyright 2020 the V8 project authors. All rights reserved.\n"
+ << "// Use of this source code is governed by a BSD-style license that\n"
+ << "// can be found in the LICENSE file.\n\n"
+ << "// Automatically generated by regexp/gen-regexp-special-case.cc\n\n"
+ << "// The following functions are used to build UnicodeSets\n"
+ << "// for special cases where the case-folding algorithm used by\n"
+ << "// UnicodeSet::closeOver(USET_CASE_INSENSITIVE) does not match\n"
+ << "// the algorithm defined in ECMAScript 2020 21.2.2.8.2 (Runtime\n"
+ << "// Semantics: Canonicalize) step 3.\n\n"
<< "#ifdef V8_INTL_SUPPORT\n"
+ << "#include \"src/base/lazy-instance.h\"\n\n"
<< "#include \"src/regexp/special-case.h\"\n\n"
<< "#include \"unicode/uniset.h\"\n"
<< "namespace v8 {\n"
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index f9015287f9..7f6bd5e296 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -205,9 +205,8 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
@@ -314,18 +313,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
-// Isolate* isolate or 0 if unicode flag.
+ // Isolate* isolate.
// Set isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ mov(Operand(esp, 3 * kSystemPointerSize), Immediate(0));
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ mov(Operand(esp, 3 * kSystemPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- }
+ __ mov(Operand(esp, 3 * kSystemPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
// Set byte_length.
__ mov(Operand(esp, 2 * kSystemPointerSize), ebx);
// Set byte_offset2.
@@ -366,7 +358,6 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerIA32::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index b2c6fab7b3..f68dd0b1b7 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 1e7839c219..e3f2ea6292 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -106,6 +106,8 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -223,9 +225,8 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
}
-
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ lw(a0, register_location(start_reg)); // Index of start of capture.
__ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
@@ -320,7 +321,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a0: Address byte_offset1 - Address captured substring's start.
// a1: Address byte_offset2 - Address of current character position.
// a2: size_t byte_length - length of capture in bytes(!).
- // a3: Isolate* isolate or 0 if unicode flag.
+ // a3: Isolate* isolate.
// Address of start of capture.
__ Addu(a0, a0, Operand(end_of_input_address()));
@@ -334,14 +335,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Subu(a1, a1, Operand(s3));
}
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ mov(a3, zero_reg);
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
- }
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -368,7 +362,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 9281b0174d..5733bbe046 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 3dd1548685..fc3cad8b0e 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -142,6 +142,8 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -259,9 +261,8 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
}
-
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ Ld(a0, register_location(start_reg)); // Index of start of capture.
__ Ld(a1, register_location(start_reg + 1)); // Index of end of capture.
@@ -356,7 +357,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a0: Address byte_offset1 - Address captured substring's start.
// a1: Address byte_offset2 - Address of current character position.
// a2: size_t byte_length - length of capture in bytes(!).
- // a3: Isolate* isolate or 0 if unicode flag.
+ // a3: Isolate* isolate.
// Address of start of capture.
__ Daddu(a0, a0, Operand(end_of_input_address()));
@@ -370,14 +371,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Dsubu(a1, a1, Operand(s3));
}
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ mov(a3, zero_reg);
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
- }
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -404,7 +398,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index bc7f83e6e9..b267297c24 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 50bf71e6d5..376103324a 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
@@ -111,6 +111,8 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
@@ -123,7 +125,6 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
__ bind(&start_label_); // And then continue from here.
}
-
RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
@@ -241,7 +242,7 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
__ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
@@ -336,7 +337,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// r3: Address byte_offset1 - Address captured substring's start.
// r4: Address byte_offset2 - Address of current character position.
// r5: size_t byte_length - length of capture in bytes(!)
- // r6: Isolate* isolate or 0 if unicode flag.
+ // r6: Isolate* isolate.
// Address of start of capture.
__ add(r3, r3, end_of_input_address());
@@ -350,14 +351,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ sub(r4, r4, r25);
}
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ li(r6, Operand::Zero());
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
- }
+ __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -381,7 +375,6 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
@@ -1371,4 +1364,4 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index c726a5f0d7..3e64f139a8 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 3de29512ea..a9106d3d30 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -463,7 +463,11 @@ class RegExpQuantifier final : public RegExpTree {
class RegExpCapture final : public RegExpTree {
public:
explicit RegExpCapture(int index)
- : body_(nullptr), index_(index), name_(nullptr) {}
+ : body_(nullptr),
+ index_(index),
+ min_match_(0),
+ max_match_(0),
+ name_(nullptr) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
static RegExpNode* ToNode(RegExpTree* body, int index,
@@ -473,10 +477,14 @@ class RegExpCapture final : public RegExpTree {
bool IsAnchoredAtEnd() override;
Interval CaptureRegisters() override;
bool IsCapture() override;
- int min_match() override { return body_->min_match(); }
- int max_match() override { return body_->max_match(); }
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
RegExpTree* body() { return body_; }
- void set_body(RegExpTree* body) { body_ = body; }
+ void set_body(RegExpTree* body) {
+ body_ = body;
+ min_match_ = body->min_match();
+ max_match_ = body->max_match();
+ }
int index() const { return index_; }
const ZoneVector<uc16>* name() const { return name_; }
void set_name(const ZoneVector<uc16>* name) { name_ = name; }
@@ -486,12 +494,17 @@ class RegExpCapture final : public RegExpTree {
private:
RegExpTree* body_;
int index_;
+ int min_match_;
+ int max_match_;
const ZoneVector<uc16>* name_;
};
class RegExpGroup final : public RegExpTree {
public:
- explicit RegExpGroup(RegExpTree* body) : body_(body) {}
+ explicit RegExpGroup(RegExpTree* body)
+ : body_(body),
+ min_match_(body->min_match()),
+ max_match_(body->max_match()) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) override {
@@ -501,13 +514,15 @@ class RegExpGroup final : public RegExpTree {
bool IsAnchoredAtStart() override { return body_->IsAnchoredAtStart(); }
bool IsAnchoredAtEnd() override { return body_->IsAnchoredAtEnd(); }
bool IsGroup() override;
- int min_match() override { return body_->min_match(); }
- int max_match() override { return body_->max_match(); }
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
Interval CaptureRegisters() override { return body_->CaptureRegisters(); }
RegExpTree* body() { return body_; }
private:
RegExpTree* body_;
+ int min_match_;
+ int max_match_;
};
class RegExpLookaround final : public RegExpTree {
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 0dcc288d3c..e82b67b530 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -329,13 +329,11 @@ void RegExpBytecodeGenerator::CheckNotBackReference(int start_reg,
}
void RegExpBytecodeGenerator::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
+ int start_reg, bool read_backward, Label* on_not_equal) {
DCHECK_LE(0, start_reg);
DCHECK_GE(kMaxRegister, start_reg);
- Emit(read_backward ? (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD
- : BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD)
- : (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE
- : BC_CHECK_NOT_BACK_REF_NO_CASE),
+ Emit(read_backward ? BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD
+ : BC_CHECK_NOT_BACK_REF_NO_CASE,
start_reg);
EmitOrLink(on_not_equal);
}
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index dfcc2ca5f8..85073cc99d 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -69,7 +69,7 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
diff --git a/deps/v8/src/regexp/regexp-bytecode-peephole.cc b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
index 8f1f1d95a9..f0957f0779 100644
--- a/deps/v8/src/regexp/regexp-bytecode-peephole.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
@@ -436,7 +436,6 @@ BytecodeArgumentMapping BytecodeSequenceNode::ArgumentMapping(
size_t index) const {
DCHECK(IsSequence());
DCHECK(argument_mapping_ != nullptr);
- DCHECK_GE(index, 0);
DCHECK_LT(index, argument_mapping_->size());
return argument_mapping_->at(index);
diff --git a/deps/v8/src/regexp/regexp-bytecodes.h b/deps/v8/src/regexp/regexp-bytecodes.h
index e25945d0a0..1664a476d2 100644
--- a/deps/v8/src/regexp/regexp-bytecodes.h
+++ b/deps/v8/src/regexp/regexp-bytecodes.h
@@ -101,12 +101,12 @@ STATIC_ASSERT(1 << BYTECODE_SHIFT > BYTECODE_MASK);
V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
- V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
- V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
- V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE, 39, 8) \
+ V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE, 39, 8) /* UNUSED */ \
V(CHECK_NOT_BACK_REF_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 41, 8) /* bc8 reg_idx24 addr32 */ \
- V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD, 42, 8) \
+ V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD, 42, 8) /* UNUSED */ \
V(CHECK_NOT_REGS_EQUAL, 43, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
V(CHECK_REGISTER_LT, 44, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_GE, 45, 12) /* bc8 reg_idx24 value32 addr32 */ \
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index 2d86d3ea9e..40ecee0f91 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -1140,39 +1140,6 @@ Vector<const int> CharacterRange::GetWordBounds() {
return Vector<const int>(kWordRanges, kWordRangeCount - 1);
}
-#ifdef V8_INTL_SUPPORT
-struct IgnoreSet {
- IgnoreSet() : set(BuildIgnoreSet()) {}
- const icu::UnicodeSet set;
-};
-
-struct SpecialAddSet {
- SpecialAddSet() : set(BuildSpecialAddSet()) {}
- const icu::UnicodeSet set;
-};
-
-icu::UnicodeSet BuildAsciiAToZSet() {
- icu::UnicodeSet set('a', 'z');
- set.add('A', 'Z');
- set.freeze();
- return set;
-}
-
-struct AsciiAToZSet {
- AsciiAToZSet() : set(BuildAsciiAToZSet()) {}
- const icu::UnicodeSet set;
-};
-
-static base::LazyInstance<IgnoreSet>::type ignore_set =
- LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<SpecialAddSet>::type special_add_set =
- LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<AsciiAToZSet>::type ascii_a_to_z_set =
- LAZY_INSTANCE_INITIALIZER;
-#endif // V8_INTL_SUPPORT
-
// static
void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
ZoneList<CharacterRange>* ranges,
@@ -1195,75 +1162,22 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
others.add(from, to);
}
- // Set of characters already added to ranges that do not need to be added
- // again.
+ // Compute the set of additional characters that should be added,
+ // using UnicodeSet::closeOver. ECMA 262 defines slightly different
+ // case-folding rules than Unicode, so some characters that are
+ // added by closeOver do not match anything other than themselves in
+ // JS. For example, 'ſ' (U+017F LATIN SMALL LETTER LONG S) is the
+ // same case-insensitive character as 's' or 'S' according to
+ // Unicode, but does not match any other character in JS. To handle
+ // this case, we add such characters to the IgnoreSet and filter
+ // them out. We filter twice: once before calling closeOver (to
+ // prevent 'ſ' from adding 's'), and once after calling closeOver
+ // (to prevent 's' from adding 'ſ'). See regexp/special-case.h for
+ // more information.
icu::UnicodeSet already_added(others);
-
- // Set of characters in ranges that are in the 52 ASCII characters [a-zA-Z].
- icu::UnicodeSet in_ascii_a_to_z(others);
- in_ascii_a_to_z.retainAll(ascii_a_to_z_set.Pointer()->set);
-
- // Remove all chars in [a-zA-Z] from others.
- others.removeAll(in_ascii_a_to_z);
-
- // Set of characters in ranges that are overlapping with special add set.
- icu::UnicodeSet in_special_add(others);
- in_special_add.retainAll(special_add_set.Pointer()->set);
-
- others.removeAll(in_special_add);
-
- // Ignore all chars in ignore set.
- others.removeAll(ignore_set.Pointer()->set);
-
- // For most of the chars in ranges that is still in others, find the case
- // equivlant set by calling closeOver(USET_CASE_INSENSITIVE).
+ others.removeAll(RegExpCaseFolding::IgnoreSet());
others.closeOver(USET_CASE_INSENSITIVE);
-
- // Because closeOver(USET_CASE_INSENSITIVE) may add ASCII [a-zA-Z] to others,
- // but ECMA262 "i" mode won't consider that, remove them from others.
- // Ex: U+017F add 'S' and 's' to others.
- others.removeAll(ascii_a_to_z_set.Pointer()->set);
-
- // Special handling for in_ascii_a_to_z.
- for (int32_t i = 0; i < in_ascii_a_to_z.getRangeCount(); i++) {
- UChar32 start = in_ascii_a_to_z.getRangeStart(i);
- UChar32 end = in_ascii_a_to_z.getRangeEnd(i);
- // Check if it is uppercase A-Z by checking bit 6.
- if (start & 0x0020) {
- // Add the lowercases
- others.add(start & 0x005F, end & 0x005F);
- } else {
- // Add the uppercases
- others.add(start | 0x0020, end | 0x0020);
- }
- }
-
- // Special handling for chars in "Special Add" set.
- for (int32_t i = 0; i < in_special_add.getRangeCount(); i++) {
- UChar32 end = in_special_add.getRangeEnd(i);
- for (UChar32 ch = in_special_add.getRangeStart(i); ch <= end; ch++) {
- // Add the uppercase of this character if itself is not an uppercase
- // character.
- // Note: The if condiction cannot be u_islower(ch) because ch could be
- // neither uppercase nor lowercase but Mn.
- if (!u_isupper(ch)) {
- others.add(u_toupper(ch));
- }
- icu::UnicodeSet candidates(ch, ch);
- candidates.closeOver(USET_CASE_INSENSITIVE);
- for (int32_t j = 0; j < candidates.getRangeCount(); j++) {
- UChar32 end2 = candidates.getRangeEnd(j);
- for (UChar32 ch2 = candidates.getRangeStart(j); ch2 <= end2; ch2++) {
- // Add character that is not uppercase to others.
- if (!u_isupper(ch2)) {
- others.add(ch2);
- }
- }
- }
- }
- }
-
- // Remove all characters which already in the ranges.
+ others.removeAll(RegExpCaseFolding::IgnoreSet());
others.removeAll(already_added);
// Add others to the ranges
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index d141f3c490..a6c7cdbe2f 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -8,7 +8,9 @@
#include "src/execution/isolate.h"
#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
-#include "src/regexp/regexp-macro-assembler-tracer.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
+#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
@@ -242,20 +244,15 @@ RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
Isolate* isolate, RegExpMacroAssembler* macro_assembler, RegExpNode* start,
int capture_count, Handle<String> pattern) {
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler)
- macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
- else
-#endif
- macro_assembler_ = macro_assembler;
+ macro_assembler_ = macro_assembler;
- std::vector<RegExpNode*> work_list;
+ ZoneVector<RegExpNode*> work_list(zone());
work_list_ = &work_list;
Label fail;
macro_assembler_->PushBacktrack(&fail);
Trace new_trace;
start->Emit(this, &new_trace);
- macro_assembler_->Bind(&fail);
+ macro_assembler_->BindJumpTarget(&fail);
macro_assembler_->Fail();
while (!work_list.empty()) {
RegExpNode* node = work_list.back();
@@ -269,14 +266,9 @@ RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
}
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
- isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
+ isolate->IncreaseTotalRegexpCodeGenerated(code);
work_list_ = nullptr;
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler) {
- delete macro_assembler_;
- }
-#endif
return {*code, next_register_};
}
@@ -562,7 +554,7 @@ void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
}
// On backtrack we need to restore state.
- assembler->Bind(&undo);
+ assembler->BindJumpTarget(&undo);
RestoreAffectedRegisters(assembler, max_register, registers_to_pop,
registers_to_clear);
if (backtrack() == nullptr) {
@@ -725,32 +717,34 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
unibrow::uchar* letters,
int letter_length) {
#ifdef V8_INTL_SUPPORT
- // Special case for U+017F which has upper case in ASCII range.
- if (character == 0x017f) {
+ if (RegExpCaseFolding::IgnoreSet().contains(character)) {
letters[0] = character;
return 1;
}
+ bool in_special_add_set =
+ RegExpCaseFolding::SpecialAddSet().contains(character);
+
icu::UnicodeSet set;
set.add(character);
set = set.closeOver(USET_CASE_INSENSITIVE);
+
+ UChar32 canon = 0;
+ if (in_special_add_set) {
+ canon = RegExpCaseFolding::Canonicalize(character);
+ }
+
int32_t range_count = set.getRangeCount();
int items = 0;
for (int32_t i = 0; i < range_count; i++) {
UChar32 start = set.getRangeStart(i);
UChar32 end = set.getRangeEnd(i);
CHECK(end - start + items <= letter_length);
- // Only add to the output if character is not in ASCII range
- // or the case equivalent character is in ASCII range.
- // #sec-runtime-semantics-canonicalize-ch
- // 3.g If the numeric value of ch ≥ 128 and the numeric value of cu < 128,
- // return ch.
- if (!((start >= 128) && (character < 128))) {
- // No range have start and end span across code point 128.
- DCHECK((start >= 128) == (end >= 128));
- for (UChar32 cu = start; cu <= end; cu++) {
- if (one_byte_subject && cu > String::kMaxOneByteCharCode) break;
- letters[items++] = (unibrow::uchar)(cu);
+ for (UChar32 cu = start; cu <= end; cu++) {
+ if (one_byte_subject && cu > String::kMaxOneByteCharCode) break;
+ if (in_special_add_set && RegExpCaseFolding::Canonicalize(cu) != canon) {
+ continue;
}
+ letters[items++] = (unibrow::uchar)(cu);
}
}
return items;
@@ -857,10 +851,6 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
return false;
}
-using EmitCharacterFunction = bool(Isolate* isolate, RegExpCompiler* compiler,
- uc16 c, Label* on_failure, int cp_offset,
- bool check, bool preloaded);
-
// Only emits letters (things that have case). Only used for case independent
// matches.
static inline bool EmitAtomLetter(Isolate* isolate, RegExpCompiler* compiler,
@@ -1848,13 +1838,13 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
- uint16_t c = quarks[j];
+ uc16 c = quarks[j];
if (elm.atom()->ignore_case()) {
c = unibrow::Latin1::TryConvertToLatin1(c);
}
if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
// Replace quark in case we converted to Latin-1.
- uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.begin());
+ uc16* writable_quarks = const_cast<uc16*>(quarks.begin());
writable_quarks[j] = c;
}
} else {
@@ -2309,7 +2299,6 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
- EmitCharacterFunction* emit_function = nullptr;
uc16 quark = quarks[j];
if (elm.atom()->ignore_case()) {
// Everywhere else we assume that a non-Latin-1 character cannot match
@@ -2317,6 +2306,9 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
// invalid by using the Latin1 equivalent instead.
quark = unibrow::Latin1::TryConvertToLatin1(quark);
}
+ bool needs_bounds_check =
+ *checked_up_to < cp_offset + j || read_backward();
+ bool bounds_checked = false;
switch (pass) {
case NON_LATIN1_MATCH:
DCHECK(one_byte);
@@ -2326,24 +2318,24 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
}
break;
case NON_LETTER_CHARACTER_MATCH:
- emit_function = &EmitAtomNonLetter;
+ bounds_checked =
+ EmitAtomNonLetter(isolate, compiler, quark, backtrack,
+ cp_offset + j, needs_bounds_check, preloaded);
break;
case SIMPLE_CHARACTER_MATCH:
- emit_function = &EmitSimpleCharacter;
+ bounds_checked = EmitSimpleCharacter(isolate, compiler, quark,
+ backtrack, cp_offset + j,
+ needs_bounds_check, preloaded);
break;
case CASE_CHARACTER_MATCH:
- emit_function = &EmitAtomLetter;
+ bounds_checked =
+ EmitAtomLetter(isolate, compiler, quark, backtrack,
+ cp_offset + j, needs_bounds_check, preloaded);
break;
default:
break;
}
- if (emit_function != nullptr) {
- bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
- bool bound_checked =
- emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
- bounds_check, preloaded);
- if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
- }
+ if (bounds_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
}
} else {
DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type());
@@ -3429,8 +3421,8 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
DCHECK_EQ(start_reg_ + 1, end_reg_);
if (IgnoreCase(flags_)) {
- assembler->CheckNotBackReferenceIgnoreCase(
- start_reg_, read_backward(), IsUnicode(flags_), trace->backtrack());
+ assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
+ trace->backtrack());
} else {
assembler->CheckNotBackReference(start_reg_, read_backward(),
trace->backtrack());
@@ -3602,12 +3594,17 @@ template <typename... Propagators>
class Analysis : public NodeVisitor {
public:
Analysis(Isolate* isolate, bool is_one_byte)
- : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
+ : isolate_(isolate),
+ is_one_byte_(is_one_byte),
+ error_(RegExpError::kNone) {}
void EnsureAnalyzed(RegExpNode* that) {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
- fail("Stack overflow");
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("Analysis: Aborting on stack overflow");
+ }
+ fail(RegExpError::kAnalysisStackOverflow);
return;
}
if (that->info()->been_analyzed || that->info()->being_analyzed) return;
@@ -3617,12 +3614,12 @@ class Analysis : public NodeVisitor {
that->info()->been_analyzed = true;
}
- bool has_failed() { return error_message_ != nullptr; }
- const char* error_message() {
- DCHECK(error_message_ != nullptr);
- return error_message_;
+ bool has_failed() { return error_ != RegExpError::kNone; }
+ RegExpError error() {
+ DCHECK(error_ != RegExpError::kNone);
+ return error_;
}
- void fail(const char* error_message) { error_message_ = error_message; }
+ void fail(RegExpError error) { error_ = error; }
Isolate* isolate() const { return isolate_; }
@@ -3707,19 +3704,19 @@ class Analysis : public NodeVisitor {
private:
Isolate* isolate_;
bool is_one_byte_;
- const char* error_message_;
+ RegExpError error_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
-const char* AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
RegExpNode* node) {
Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(isolate,
is_one_byte);
DCHECK_EQ(node->info()->been_analyzed, false);
analysis.EnsureAnalyzed(node);
- DCHECK_IMPLIES(analysis.has_failed(), analysis.error_message() != nullptr);
- return analysis.has_failed() ? analysis.error_message() : nullptr;
+ DCHECK_IMPLIES(analysis.has_failed(), analysis.error() != RegExpError::kNone);
+ return analysis.has_failed() ? analysis.error() : RegExpError::kNone;
}
void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index 2de221f35d..d083d5d9dd 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -423,10 +423,7 @@ struct PreloadState {
// Analysis performs assertion propagation and computes eats_at_least_ values.
// See the comments on AssertionPropagator and EatsAtLeastPropagator for more
// details.
-//
-// This method returns nullptr on success or a null-terminated failure message
-// on failure.
-const char* AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpNode* node);
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpNode* node);
class FrequencyCollator {
public:
@@ -503,18 +500,17 @@ class RegExpCompiler {
}
struct CompilationResult final {
- explicit CompilationResult(const char* error_message)
- : error_message(error_message) {}
+ explicit CompilationResult(RegExpError err) : error(err) {}
CompilationResult(Object code, int registers)
: code(code), num_registers(registers) {}
static CompilationResult RegExpTooBig() {
- return CompilationResult("RegExp too big");
+ return CompilationResult(RegExpError::kTooLarge);
}
- bool Succeeded() const { return error_message == nullptr; }
+ bool Succeeded() const { return error == RegExpError::kNone; }
- const char* const error_message = nullptr;
+ const RegExpError error = RegExpError::kNone;
Object code;
int num_registers = 0;
};
@@ -576,7 +572,7 @@ class RegExpCompiler {
int next_register_;
int unicode_lookaround_stack_register_;
int unicode_lookaround_position_register_;
- std::vector<RegExpNode*>* work_list_;
+ ZoneVector<RegExpNode*>* work_list_;
int recursion_depth_;
RegExpMacroAssembler* macro_assembler_;
bool one_byte_;
diff --git a/deps/v8/src/regexp/regexp-error.cc b/deps/v8/src/regexp/regexp-error.cc
new file mode 100644
index 0000000000..d7763c64f8
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-error.cc
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-error.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const kRegExpErrorStrings[] = {
+#define TEMPLATE(NAME, STRING) STRING,
+ REGEXP_ERROR_MESSAGES(TEMPLATE)
+#undef TEMPLATE
+};
+
+const char* RegExpErrorString(RegExpError error) {
+ DCHECK_LT(error, RegExpError::NumErrors);
+ return kRegExpErrorStrings[static_cast<int>(error)];
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-error.h b/deps/v8/src/regexp/regexp-error.h
new file mode 100644
index 0000000000..6145b404ab
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-error.h
@@ -0,0 +1,58 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_ERROR_H_
+#define V8_REGEXP_REGEXP_ERROR_H_
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+#define REGEXP_ERROR_MESSAGES(T) \
+ T(None, "") \
+ T(StackOverflow, "Maximum call stack size exceeded") \
+ T(AnalysisStackOverflow, "Stack overflow") \
+ T(TooLarge, "Regular expression too large") \
+ T(UnterminatedGroup, "Unterminated group") \
+ T(UnmatchedParen, "Unmatched ')'") \
+ T(EscapeAtEndOfPattern, "\\ at end of pattern") \
+ T(InvalidPropertyName, "Invalid property name") \
+ T(InvalidEscape, "Invalid escape") \
+ T(InvalidDecimalEscape, "Invalid decimal escape") \
+ T(InvalidUnicodeEscape, "Invalid Unicode escape") \
+ T(NothingToRepeat, "Nothing to repeat") \
+ T(LoneQuantifierBrackets, "Lone quantifier brackets") \
+ T(RangeOutOfOrder, "numbers out of order in {} quantifier") \
+ T(IncompleteQuantifier, "Incomplete quantifier") \
+ T(InvalidQuantifier, "Invalid quantifier") \
+ T(InvalidGroup, "Invalid group") \
+ T(MultipleFlagDashes, "Multiple dashes in flag group") \
+ T(RepeatedFlag, "Repeated flag in flag group") \
+ T(InvalidFlagGroup, "Invalid flag group") \
+ T(TooManyCaptures, "Too many captures") \
+ T(InvalidCaptureGroupName, "Invalid capture group name") \
+ T(DuplicateCaptureGroupName, "Duplicate capture group name") \
+ T(InvalidNamedReference, "Invalid named reference") \
+ T(InvalidNamedCaptureReference, "Invalid named capture referenced") \
+ T(InvalidClassEscape, "Invalid class escape") \
+ T(InvalidClassPropertyName, "Invalid property name in character class") \
+ T(InvalidCharacterClass, "Invalid character class") \
+ T(UnterminatedCharacterClass, "Unterminated character class") \
+ T(OutOfOrderCharacterClass, "Range out of order in character class")
+
+enum class RegExpError : uint32_t {
+#define TEMPLATE(NAME, STRING) k##NAME,
+ REGEXP_ERROR_MESSAGES(TEMPLATE)
+#undef TEMPLATE
+ NumErrors
+};
+
+V8_EXPORT_PRIVATE const char* RegExpErrorString(RegExpError error);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_ERROR_H_
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index a74df90c1d..d3efa65bf1 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -35,18 +35,18 @@ namespace internal {
namespace {
bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
- Vector<const uc16> subject, bool unicode) {
+ Vector<const uc16> subject) {
Address offset_a =
reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(from)));
Address offset_b =
reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(current)));
size_t length = len * kUC16Size;
- return RegExpMacroAssembler::CaseInsensitiveCompareUC16(
- offset_a, offset_b, length, unicode ? nullptr : isolate) == 1;
+ return RegExpMacroAssembler::CaseInsensitiveCompareUC16(offset_a, offset_b,
+ length, isolate) == 1;
}
bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
- Vector<const uint8_t> subject, bool unicode) {
+ Vector<const uint8_t> subject) {
// For Latin1 characters the unicode flag makes no difference.
for (int i = 0; i < len; i++) {
unsigned int old_char = subject[from++];
@@ -747,26 +747,14 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from >= 0 && len > 0) {
- if (current + len > subject.length() ||
- !BackRefMatchesNoCase(isolate, from, current, len, subject, true)) {
- SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
- DISPATCH();
- }
- current += len;
- }
- ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE);
- DISPATCH();
+ UNREACHABLE(); // TODO(jgruber): Remove this unused bytecode.
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
if (from >= 0 && len > 0) {
if (current + len > subject.length() ||
- !BackRefMatchesNoCase(isolate, from, current, len, subject,
- false)) {
+ !BackRefMatchesNoCase(isolate, from, current, len, subject)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
DISPATCH();
}
@@ -776,27 +764,14 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from >= 0 && len > 0) {
- if (current - len < 0 ||
- !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
- true)) {
- SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
- DISPATCH();
- }
- current -= len;
- }
- ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD);
- DISPATCH();
+ UNREACHABLE(); // TODO(jgruber): Remove this unused bytecode.
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
if (from >= 0 && len > 0) {
if (current - len < 0 ||
- !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
- false)) {
+ !BackRefMatchesNoCase(isolate, from, current - len, len, subject)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
DISPATCH();
}
@@ -1029,6 +1004,8 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
}
}
+#ifndef COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER
+
// This method is called through an external reference from RegExpExecInternal
// builtin.
IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
@@ -1076,6 +1053,8 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
return result;
}
+#endif // !COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER
+
IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromRuntime(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject_string,
int* registers, int registers_length, int start_position) {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
index 2dc6739e42..8ec12a0ae6 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -15,7 +15,7 @@
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 5dca04a18c..0a12201743 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -351,17 +351,15 @@ void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
assembler_->CheckNotBackReference(start_reg, read_backward, on_no_match);
}
-
void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s %s, label[%08x]);\n",
+ int start_reg, bool read_backward, Label* on_no_match) {
+ PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s, label[%08x]);\n",
start_reg, read_backward ? "backward" : "forward",
- unicode ? "unicode" : "non-unicode", LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward, unicode,
+ LabelToInt(on_no_match));
+ assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward,
on_no_match);
}
-
void RegExpMacroAssemblerTracer::CheckPosition(int cp_offset,
Label* on_outside_input) {
PrintF(" CheckPosition(cp_offset=%d, label[%08x]);\n", cp_offset,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index 2a44146e73..b6ad63071f 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -33,7 +33,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) override;
void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
- bool unicode,
Label* on_no_match) override;
void CheckNotCharacter(unsigned c, Label* on_not_equal) override;
void CheckNotCharacterAfterAnd(unsigned c, unsigned and_with,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 30a9955dc3..3ac1bb7f57 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/pointer-authentication.h"
#include "src/execution/simulator.h"
#include "src/regexp/regexp-stack.h"
#include "src/strings/unicode-inl.h"
@@ -114,34 +115,7 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() {
return FLAG_enable_regexp_unaligned_accesses && !slow_safe();
}
-const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
- String subject, int start_index, const DisallowHeapAllocation& no_gc) {
- if (subject.IsConsString()) {
- subject = ConsString::cast(subject).first();
- } else if (subject.IsSlicedString()) {
- start_index += SlicedString::cast(subject).offset();
- subject = SlicedString::cast(subject).parent();
- }
- if (subject.IsThinString()) {
- subject = ThinString::cast(subject).actual();
- }
- DCHECK_LE(0, start_index);
- DCHECK_LE(start_index, subject.length());
- if (subject.IsSeqOneByteString()) {
- return reinterpret_cast<const byte*>(
- SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
- } else if (subject.IsSeqTwoByteString()) {
- return reinterpret_cast<const byte*>(
- SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
- } else if (subject.IsExternalOneByteString()) {
- return reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(subject).GetChars() + start_index);
- } else {
- DCHECK(subject.IsExternalTwoByteString());
- return reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(subject).GetChars() + start_index);
- }
-}
+#ifndef COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER
// This method may only be called after an interrupt.
int NativeRegExpMacroAssembler::CheckStackGuardState(
@@ -149,9 +123,10 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
Address* return_address, Code re_code, Address* subject,
const byte** input_start, const byte** input_end) {
DisallowHeapAllocation no_gc;
+ Address old_pc = PointerAuthentication::AuthenticatePC(return_address, 0);
+ DCHECK_LE(re_code.raw_instruction_start(), old_pc);
+ DCHECK_LE(old_pc, re_code.raw_instruction_end());
- DCHECK(re_code.raw_instruction_start() <= *return_address);
- DCHECK(*return_address <= re_code.raw_instruction_end());
StackLimitCheck check(isolate);
bool js_has_overflowed = check.JsHasOverflowed();
@@ -193,9 +168,11 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
}
if (*code_handle != re_code) { // Return address no longer valid
- intptr_t delta = code_handle->address() - re_code.address();
// Overwrite the return address on the stack.
- *return_address += delta;
+ intptr_t delta = code_handle->address() - re_code.address();
+ Address new_pc = old_pc + delta;
+ // TODO(v8:10026): avoid replacing a signed pointer.
+ PointerAuthentication::ReplacePC(return_address, new_pc, 0);
}
// If we continue, we need to update the subject string addresses.
@@ -210,8 +187,7 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
} else {
*subject = subject_handle->ptr();
intptr_t byte_length = *input_end - *input_start;
- *input_start =
- StringCharacterPosition(*subject_handle, start_index, no_gc);
+ *input_start = subject_handle->AddressOfCharacterAt(start_index, no_gc);
*input_end = *input_start + byte_length;
}
}
@@ -259,7 +235,7 @@ int NativeRegExpMacroAssembler::Match(Handle<JSRegExp> regexp,
DisallowHeapAllocation no_gc;
const byte* input_start =
- StringCharacterPosition(subject_ptr, start_offset + slice_offset, no_gc);
+ subject_ptr.AddressOfCharacterAt(start_offset + slice_offset, no_gc);
int byte_length = char_length << char_size_shift;
const byte* input_end = input_start + byte_length;
return Execute(*subject, start_offset, input_start, input_end, offsets_vector,
@@ -305,6 +281,8 @@ int NativeRegExpMacroAssembler::Execute(
return result;
}
+#endif // !COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER
+
// clang-format off
const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index bda7e5cce1..e83446cdc9 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -87,7 +87,7 @@ class RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) = 0;
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match) = 0;
// Check the current character for a match with a literal character. If we
// fail to match then goto the on_failure label. End of input always
@@ -122,6 +122,11 @@ class RegExpMacroAssembler {
// not have custom support.
// May clobber the current loaded character.
virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+
+ // Control-flow integrity:
+ // Define a jump target and bind a label.
+ virtual void BindJumpTarget(Label* label) { Bind(label); }
+
virtual void Fail() = 0;
virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
@@ -246,9 +251,6 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
static Address GrowStack(Address stack_pointer, Address* stack_top,
Isolate* isolate);
- static const byte* StringCharacterPosition(
- String subject, int start_index, const DisallowHeapAllocation& no_gc);
-
static int CheckStackGuardState(Isolate* isolate, int start_index,
RegExp::CallOrigin call_origin,
Address* return_address, Code re_code,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 951f815374..3c1115414f 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -24,11 +24,10 @@
namespace v8 {
namespace internal {
-RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
- JSRegExp::Flags flags, Isolate* isolate, Zone* zone)
+RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
+ Isolate* isolate, Zone* zone)
: isolate_(isolate),
zone_(zone),
- error_(error),
captures_(nullptr),
named_captures_(nullptr),
named_back_references_(nullptr),
@@ -81,13 +80,12 @@ void RegExpParser::Advance() {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
- ReportError(CStrVector(
- MessageFormatter::TemplateString(MessageTemplate::kStackOverflow)));
+ ReportError(RegExpError::kStackOverflow);
} else if (zone()->excess_allocation()) {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on excess zone allocation");
}
- ReportError(CStrVector("Regular expression too large"));
+ ReportError(RegExpError::kTooLarge);
} else {
current_ = ReadNext<true>();
}
@@ -139,15 +137,12 @@ bool RegExpParser::IsSyntaxCharacterOrSlash(uc32 c) {
return false;
}
-
-RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
+RegExpTree* RegExpParser::ReportError(RegExpError error) {
if (failed_) return nullptr; // Do not overwrite any existing error.
failed_ = true;
- *error_ = isolate()
- ->factory()
- ->NewStringFromOneByte(Vector<const uint8_t>::cast(message))
- .ToHandleChecked();
- // Zip to the end to make sure the no more input is read.
+ error_ = error;
+ error_pos_ = position();
+ // Zip to the end to make sure no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
return nullptr;
@@ -194,14 +189,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case kEndMarker:
if (state->IsSubexpression()) {
// Inside a parenthesized group when hitting end of input.
- return ReportError(CStrVector("Unterminated group"));
+ return ReportError(RegExpError::kUnterminatedGroup);
}
DCHECK_EQ(INITIAL, state->group_type());
// Parsing completed successfully.
return builder->ToRegExp();
case ')': {
if (!state->IsSubexpression()) {
- return ReportError(CStrVector("Unmatched ')'"));
+ return ReportError(RegExpError::kUnmatchedParen);
}
DCHECK_NE(INITIAL, state->group_type());
@@ -252,7 +247,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '*':
case '+':
case '?':
- return ReportError(CStrVector("Nothing to repeat"));
+ return ReportError(RegExpError::kNothingToRepeat);
case '^': {
Advance();
if (builder->multiline()) {
@@ -307,7 +302,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '\\':
switch (Next()) {
case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
+ return ReportError(RegExpError::kEscapeAtEndOfPattern);
case 'b':
Advance(2);
builder->AddAssertion(new (zone()) RegExpAssertion(
@@ -347,7 +342,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
if (unicode()) {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
- std::vector<char> name_1, name_2;
+ ZoneVector<char> name_1(zone());
+ ZoneVector<char> name_2(zone());
if (ParsePropertyClassName(&name_1, &name_2)) {
if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
RegExpCharacterClass* cc = new (zone())
@@ -363,7 +359,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
}
}
- return ReportError(CStrVector("Invalid property name"));
+ return ReportError(RegExpError::kInvalidPropertyName);
} else {
builder->AddCharacter(p);
}
@@ -399,7 +395,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// With /u, no identity escapes except for syntax characters
// are allowed. Otherwise, all identity escapes are allowed.
if (unicode()) {
- return ReportError(CStrVector("Invalid escape"));
+ return ReportError(RegExpError::kInvalidEscape);
}
uc32 first_digit = Next();
if (first_digit == '8' || first_digit == '9') {
@@ -413,7 +409,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance();
if (unicode() && Next() >= '0' && Next() <= '9') {
// With /u, decimal escape with leading 0 are not parsed as octal.
- return ReportError(CStrVector("Invalid decimal escape"));
+ return ReportError(RegExpError::kInvalidDecimalEscape);
}
uc32 octal = ParseOctalLiteral();
builder->AddCharacter(octal);
@@ -454,7 +450,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// ES#prod-annexB-ExtendedPatternCharacter
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
- return ReportError(CStrVector("Invalid unicode escape"));
+ return ReportError(RegExpError::kInvalidUnicodeEscape);
}
builder->AddCharacter('\\');
} else {
@@ -472,7 +468,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter('x');
} else {
// With /u, invalid escapes are not treated as identity escapes.
- return ReportError(CStrVector("Invalid escape"));
+ return ReportError(RegExpError::kInvalidEscape);
}
break;
}
@@ -485,7 +481,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter('u');
} else {
// With /u, invalid escapes are not treated as identity escapes.
- return ReportError(CStrVector("Invalid Unicode escape"));
+ return ReportError(RegExpError::kInvalidUnicodeEscape);
}
break;
}
@@ -509,7 +505,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddCharacter(current());
Advance();
} else {
- return ReportError(CStrVector("Invalid escape"));
+ return ReportError(RegExpError::kInvalidEscape);
}
break;
}
@@ -517,13 +513,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '{': {
int dummy;
bool parsed = ParseIntervalQuantifier(&dummy, &dummy CHECK_FAILED);
- if (parsed) return ReportError(CStrVector("Nothing to repeat"));
+ if (parsed) return ReportError(RegExpError::kNothingToRepeat);
V8_FALLTHROUGH;
}
case '}':
case ']':
if (unicode()) {
- return ReportError(CStrVector("Lone quantifier brackets"));
+ return ReportError(RegExpError::kLoneQuantifierBrackets);
}
V8_FALLTHROUGH;
default:
@@ -558,13 +554,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '{':
if (ParseIntervalQuantifier(&min, &max)) {
if (max < min) {
- return ReportError(
- CStrVector("numbers out of order in {} quantifier"));
+ return ReportError(RegExpError::kRangeOutOfOrder);
}
break;
} else if (unicode()) {
// With /u, incomplete quantifiers are not allowed.
- return ReportError(CStrVector("Incomplete quantifier"));
+ return ReportError(RegExpError::kIncompleteQuantifier);
}
continue;
default:
@@ -580,7 +575,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance();
}
if (!builder->AddQuantifierToAtom(min, max, quantifier_type)) {
- return ReportError(CStrVector("Invalid quantifier"));
+ return ReportError(RegExpError::kInvalidQuantifier);
}
}
}
@@ -615,7 +610,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
case 's':
case 'm': {
if (!FLAG_regexp_mode_modifiers) {
- ReportError(CStrVector("Invalid group"));
+ ReportError(RegExpError::kInvalidGroup);
return nullptr;
}
Advance();
@@ -624,7 +619,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
switch (current()) {
case '-':
if (!flags_sense) {
- ReportError(CStrVector("Multiple dashes in flag group"));
+ ReportError(RegExpError::kMultipleFlagDashes);
return nullptr;
}
flags_sense = false;
@@ -638,7 +633,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
if (current() == 'm') bit = JSRegExp::kMultiline;
if (current() == 's') bit = JSRegExp::kDotAll;
if (((switch_on | switch_off) & bit) != 0) {
- ReportError(CStrVector("Repeated flag in flag group"));
+ ReportError(RegExpError::kRepeatedFlag);
return nullptr;
}
if (flags_sense) {
@@ -666,7 +661,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
subexpr_type = GROUPING; // Will break us out of the outer loop.
continue;
default:
- ReportError(CStrVector("Invalid flag group"));
+ ReportError(RegExpError::kInvalidFlagGroup);
return nullptr;
}
}
@@ -690,13 +685,13 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
Advance();
break;
default:
- ReportError(CStrVector("Invalid group"));
+ ReportError(RegExpError::kInvalidGroup);
return nullptr;
}
}
if (subexpr_type == CAPTURE) {
if (captures_started_ >= JSRegExp::kMaxCaptures) {
- ReportError(CStrVector("Too many captures"));
+ ReportError(RegExpError::kTooManyCaptures);
return nullptr;
}
captures_started_++;
@@ -845,20 +840,20 @@ const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
if (c == '\\' && current() == 'u') {
Advance();
if (!ParseUnicodeEscape(&c)) {
- ReportError(CStrVector("Invalid Unicode escape sequence"));
+ ReportError(RegExpError::kInvalidUnicodeEscape);
return nullptr;
}
}
// The backslash char is misclassified as both ID_Start and ID_Continue.
if (c == '\\') {
- ReportError(CStrVector("Invalid capture group name"));
+ ReportError(RegExpError::kInvalidCaptureGroupName);
return nullptr;
}
if (at_start) {
if (!IsIdentifierStart(c)) {
- ReportError(CStrVector("Invalid capture group name"));
+ ReportError(RegExpError::kInvalidCaptureGroupName);
return nullptr;
}
push_code_unit(name, c);
@@ -869,7 +864,7 @@ const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
} else if (IsIdentifierPart(c)) {
push_code_unit(name, c);
} else {
- ReportError(CStrVector("Invalid capture group name"));
+ ReportError(RegExpError::kInvalidCaptureGroupName);
return nullptr;
}
}
@@ -896,7 +891,7 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
const auto& named_capture_it = named_captures_->find(capture);
if (named_capture_it != named_captures_->end()) {
- ReportError(CStrVector("Duplicate capture group name"));
+ ReportError(RegExpError::kDuplicateCaptureGroupName);
return false;
}
}
@@ -910,7 +905,7 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
RegExpParserState* state) {
// The parser is assumed to be on the '<' in \k<name>.
if (current() != '<') {
- ReportError(CStrVector("Invalid named reference"));
+ ReportError(RegExpError::kInvalidNamedReference);
return false;
}
@@ -943,7 +938,7 @@ void RegExpParser::PatchNamedBackReferences() {
if (named_back_references_ == nullptr) return;
if (named_captures_ == nullptr) {
- ReportError(CStrVector("Invalid named capture referenced"));
+ ReportError(RegExpError::kInvalidNamedCaptureReference);
return;
}
@@ -964,7 +959,7 @@ void RegExpParser::PatchNamedBackReferences() {
if (capture_it != named_captures_->end()) {
index = (*capture_it)->index();
} else {
- ReportError(CStrVector("Invalid named capture referenced"));
+ ReportError(RegExpError::kInvalidNamedCaptureReference);
return;
}
@@ -1385,8 +1380,8 @@ bool IsUnicodePropertyValueCharacter(char c) {
} // anonymous namespace
-bool RegExpParser::ParsePropertyClassName(std::vector<char>* name_1,
- std::vector<char>* name_2) {
+bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
DCHECK(name_1->empty());
DCHECK(name_2->empty());
// Parse the property class as follows:
@@ -1425,8 +1420,8 @@ bool RegExpParser::ParsePropertyClassName(std::vector<char>* name_1,
bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
bool negate,
- const std::vector<char>& name_1,
- const std::vector<char>& name_2) {
+ const ZoneVector<char>& name_1,
+ const ZoneVector<char>& name_2) {
if (name_2.empty()) {
// First attempt to interpret as general category property value name.
const char* name = name_1.data();
@@ -1463,7 +1458,7 @@ bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
}
}
-RegExpTree* RegExpParser::GetPropertySequence(const std::vector<char>& name_1) {
+RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
if (!FLAG_harmony_regexp_sequence) return nullptr;
const char* name = name_1.data();
const uc32* sequence_list = nullptr;
@@ -1529,19 +1524,19 @@ RegExpTree* RegExpParser::GetPropertySequence(const std::vector<char>& name_1) {
#else // V8_INTL_SUPPORT
-bool RegExpParser::ParsePropertyClassName(std::vector<char>* name_1,
- std::vector<char>* name_2) {
+bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
return false;
}
bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
bool negate,
- const std::vector<char>& name_1,
- const std::vector<char>& name_2) {
+ const ZoneVector<char>& name_1,
+ const ZoneVector<char>& name_2) {
return false;
}
-RegExpTree* RegExpParser::GetPropertySequence(const std::vector<char>& name) {
+RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name) {
return nullptr;
}
@@ -1605,7 +1600,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
}
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
- ReportError(CStrVector("Invalid class escape"));
+ ReportError(RegExpError::kInvalidClassEscape);
return 0;
}
if ((controlLetter >= '0' && controlLetter <= '9') ||
@@ -1638,7 +1633,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
// ES#prod-annexB-LegacyOctalEscapeSequence
if (unicode()) {
// With /u, decimal escape is not interpreted as octal character code.
- ReportError(CStrVector("Invalid class escape"));
+ ReportError(RegExpError::kInvalidClassEscape);
return 0;
}
return ParseOctalLiteral();
@@ -1648,7 +1643,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if (ParseHexEscape(2, &value)) return value;
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
- ReportError(CStrVector("Invalid escape"));
+ ReportError(RegExpError::kInvalidEscape);
return 0;
}
// If \x is not followed by a two-digit hexadecimal, treat it
@@ -1661,7 +1656,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if (ParseUnicodeEscape(&value)) return value;
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
- ReportError(CStrVector("Invalid unicode escape"));
+ ReportError(RegExpError::kInvalidUnicodeEscape);
return 0;
}
// If \u is not followed by a two-digit hexadecimal, treat it
@@ -1676,11 +1671,11 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
Advance();
return result;
}
- ReportError(CStrVector("Invalid escape"));
+ ReportError(RegExpError::kInvalidEscape);
return 0;
}
}
- return 0;
+ UNREACHABLE();
}
void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
@@ -1703,17 +1698,18 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
return;
}
case kEndMarker:
- ReportError(CStrVector("\\ at end of pattern"));
+ ReportError(RegExpError::kEscapeAtEndOfPattern);
return;
case 'p':
case 'P':
if (unicode()) {
bool negate = Next() == 'P';
Advance(2);
- std::vector<char> name_1, name_2;
+ ZoneVector<char> name_1(zone);
+ ZoneVector<char> name_2(zone);
if (!ParsePropertyClassName(&name_1, &name_2) ||
!AddPropertyClassRange(ranges, negate, name_1, name_2)) {
- ReportError(CStrVector("Invalid property name in character class"));
+ ReportError(RegExpError::kInvalidClassPropertyName);
}
*is_class_escape = true;
return;
@@ -1732,10 +1728,6 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
}
RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
- static const char* kUnterminated = "Unterminated character class";
- static const char* kRangeInvalid = "Invalid character class";
- static const char* kRangeOutOfOrder = "Range out of order in character class";
-
DCHECK_EQ(current(), '[');
Advance();
bool is_negated = false;
@@ -1768,7 +1760,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
// Either end is an escaped character class. Treat the '-' verbatim.
if (unicode()) {
// ES2015 21.2.2.15.1 step 1.
- return ReportError(CStrVector(kRangeInvalid));
+ return ReportError(RegExpError::kInvalidCharacterClass);
}
if (!is_class_1) ranges->Add(CharacterRange::Singleton(char_1), zone());
ranges->Add(CharacterRange::Singleton('-'), zone());
@@ -1777,7 +1769,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
}
// ES2015 21.2.2.15.1 step 6.
if (char_1 > char_2) {
- return ReportError(CStrVector(kRangeOutOfOrder));
+ return ReportError(RegExpError::kOutOfOrderCharacterClass);
}
ranges->Add(CharacterRange::Range(char_1, char_2), zone());
} else {
@@ -1785,7 +1777,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
}
}
if (!has_more()) {
- return ReportError(CStrVector(kUnterminated));
+ return ReportError(RegExpError::kUnterminatedCharacterClass);
}
Advance();
RegExpCharacterClass::CharacterClassFlags character_class_flags;
@@ -1802,14 +1794,16 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
FlatStringReader* input, JSRegExp::Flags flags,
RegExpCompileData* result) {
DCHECK(result != nullptr);
- RegExpParser parser(input, &result->error, flags, isolate, zone);
+ RegExpParser parser(input, flags, isolate, zone);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
DCHECK(tree == nullptr);
- DCHECK(!result->error.is_null());
+ DCHECK(parser.error_ != RegExpError::kNone);
+ result->error = parser.error_;
+ result->error_pos = parser.error_pos_;
} else {
DCHECK(tree != nullptr);
- DCHECK(result->error.is_null());
+ DCHECK(parser.error_ == RegExpError::kNone);
if (FLAG_trace_regexp_parser) {
StdoutStream os;
tree->Print(os, zone);
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index cc1948b101..aff1746bc5 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -8,6 +8,7 @@
#include "src/objects/js-regexp.h"
#include "src/objects/objects.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp-error.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -153,8 +154,8 @@ class RegExpBuilder : public ZoneObject {
class V8_EXPORT_PRIVATE RegExpParser {
public:
- RegExpParser(FlatStringReader* in, Handle<String>* error,
- JSRegExp::Flags flags, Isolate* isolate, Zone* zone);
+ RegExpParser(FlatStringReader* in, JSRegExp::Flags flags, Isolate* isolate,
+ Zone* zone);
static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
JSRegExp::Flags flags, RegExpCompileData* result);
@@ -177,13 +178,13 @@ class V8_EXPORT_PRIVATE RegExpParser {
bool ParseUnicodeEscape(uc32* value);
bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
- bool ParsePropertyClassName(std::vector<char>* name_1,
- std::vector<char>* name_2);
+ bool ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2);
bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
- const std::vector<char>& name_1,
- const std::vector<char>& name_2);
+ const ZoneVector<char>& name_1,
+ const ZoneVector<char>& name_2);
- RegExpTree* GetPropertySequence(const std::vector<char>& name_1);
+ RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
uc32 ParseOctalLiteral();
@@ -202,7 +203,7 @@ class V8_EXPORT_PRIVATE RegExpParser {
char ParseClassEscape();
- RegExpTree* ReportError(Vector<const char> message);
+ RegExpTree* ReportError(RegExpError error);
void Advance();
void Advance(int dist);
void Reset(int pos);
@@ -335,7 +336,8 @@ class V8_EXPORT_PRIVATE RegExpParser {
Isolate* isolate_;
Zone* zone_;
- Handle<String>* error_;
+ RegExpError error_ = RegExpError::kNone;
+ int error_pos_ = 0;
ZoneList<RegExpCapture*>* captures_;
ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
ZoneList<RegExpBackReference*>* named_back_references_;
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index cd199adfb2..9394398fcc 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -38,6 +38,9 @@ class RegExpStackScope {
class RegExpStack {
public:
+ RegExpStack();
+ ~RegExpStack();
+
// Number of allocated locations on the stack below the limit.
// No sequence of pushes must be longer that this without doing a stack-limit
// check.
@@ -77,9 +80,6 @@ class RegExpStack {
static constexpr size_t kMaximumStackSize = 64 * MB;
private:
- RegExpStack();
- ~RegExpStack();
-
// Artificial limit used when the thread-local state has been destroyed.
static const Address kMemoryTop =
static_cast<Address>(static_cast<uintptr_t>(-1));
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 3632deaeb8..4319990a39 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -14,6 +14,7 @@
#include "src/regexp/regexp-dotprinter.h"
#include "src/regexp/regexp-interpreter.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
+#include "src/regexp/regexp-macro-assembler-tracer.h"
#include "src/regexp/regexp-parser.h"
#include "src/strings/string-search.h"
#include "src/utils/ostreams.h"
@@ -91,9 +92,15 @@ class RegExpImpl final : public AllStatic {
};
V8_WARN_UNUSED_RESULT
-static inline MaybeHandle<Object> ThrowRegExpException(
- Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- Handle<String> error_text) {
+static inline MaybeHandle<Object> ThrowRegExpException(Isolate* isolate,
+ Handle<JSRegExp> re,
+ Handle<String> pattern,
+ RegExpError error) {
+ Vector<const char> error_data = CStrVector(RegExpErrorString(error));
+ Handle<String> error_text =
+ isolate->factory()
+ ->NewStringFromOneByte(Vector<const uint8_t>::cast(error_data))
+ .ToHandleChecked();
THROW_NEW_ERROR(
isolate,
NewSyntaxError(MessageTemplate::kMalformedRegExp, pattern, error_text),
@@ -101,7 +108,7 @@ static inline MaybeHandle<Object> ThrowRegExpException(
}
inline void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> error_text) {
+ RegExpError error_text) {
USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
error_text));
}
@@ -407,7 +414,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
is_one_byte, re->BacktrackLimit());
if (!compilation_succeeded) {
- DCHECK(!compile_data.error.is_null());
+ DCHECK(compile_data.error != RegExpError::kNone);
ThrowRegExpException(isolate, re, compile_data.error);
return false;
}
@@ -740,8 +747,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
Handle<String> sample_subject, bool is_one_byte,
uint32_t backtrack_limit) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- data->error =
- isolate->factory()->NewStringFromAsciiChecked("RegExp too big");
+ data->error = RegExpError::kTooLarge;
return false;
}
@@ -809,8 +815,8 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
- if (const char* error_message = AnalyzeRegExp(isolate, is_one_byte, node)) {
- data->error = isolate->factory()->NewStringFromAsciiChecked(error_message);
+ data->error = AnalyzeRegExp(isolate, is_one_byte, node);
+ if (data->error != RegExpError::kNone) {
return false;
}
@@ -839,7 +845,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#elif V8_TARGET_ARCH_S390
macro_assembler.reset(new RegExpMacroAssemblerS390(
isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
macro_assembler.reset(new RegExpMacroAssemblerPPC(
isolate, zone, mode, (data->capture_count + 1) * 2));
#elif V8_TARGET_ARCH_MIPS
@@ -878,8 +884,18 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
macro_assembler->set_global_mode(mode);
}
+ RegExpMacroAssembler* macro_assembler_ptr = macro_assembler.get();
+#ifdef DEBUG
+ std::unique_ptr<RegExpMacroAssembler> tracer_macro_assembler;
+ if (FLAG_trace_regexp_assembler) {
+ tracer_macro_assembler.reset(
+ new RegExpMacroAssemblerTracer(isolate, macro_assembler_ptr));
+ macro_assembler_ptr = tracer_macro_assembler.get();
+ }
+#endif
+
RegExpCompiler::CompilationResult result = compiler.Assemble(
- isolate, macro_assembler.get(), node, data->capture_count, pattern);
+ isolate, macro_assembler_ptr, node, data->capture_count, pattern);
// Code / bytecode printing.
{
@@ -902,13 +918,12 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
}
}
- if (result.error_message != nullptr) {
+ if (result.error != RegExpError::kNone) {
if (FLAG_correctness_fuzzer_suppressions &&
- strncmp(result.error_message, "Stack overflow", 15) == 0) {
+ result.error == RegExpError::kStackOverflow) {
FATAL("Aborting on stack overflow");
}
- data->error =
- isolate->factory()->NewStringFromAsciiChecked(result.error_message);
+ data->error = result.error;
}
data->code = result.code;
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 9f3581d18e..27ccbb47ba 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_H_
#include "src/objects/js-regexp.h"
+#include "src/regexp/regexp-error.h"
namespace v8 {
namespace internal {
@@ -42,7 +43,11 @@ struct RegExpCompileData {
// The error message. Only used if an error occurred during parsing or
// compilation.
- Handle<String> error;
+ RegExpError error = RegExpError::kNone;
+
+ // The position at which the error was detected. Only used if an
+ // error occurred.
+ int error_pos = 0;
// The number of capture groups, without the global capture \0.
int capture_count = 0;
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index bcef02369f..be4b85df4f 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -113,6 +113,8 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
DCHECK_EQ(0, registers_to_save % 2);
__ b(&entry_label_); // We'll write the entry code later.
@@ -228,7 +230,7 @@ void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ LoadP(r2, register_location(start_reg)); // Index of start of
// capture
@@ -325,7 +327,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// r2: Address byte_offset1 - Address captured substring's start.
// r3: Address byte_offset2 - Address of current character position.
// r4: size_t byte_length - length of capture in bytes(!)
- // r5: Isolate* isolate or 0 if unicode flag.
+ // r5: Isolate* isolate.
// Address of start of capture.
__ AddP(r2, end_of_input_address());
@@ -339,14 +341,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ SubP(r3, r3, r6);
}
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ LoadImmP(r5, Operand::Zero());
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
- }
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 4f79296d78..eced564d7f 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/deps/v8/src/regexp/special-case.h b/deps/v8/src/regexp/special-case.h
index 1ccec5d31a..753c9231ed 100644
--- a/deps/v8/src/regexp/special-case.h
+++ b/deps/v8/src/regexp/special-case.h
@@ -6,70 +6,109 @@
#define V8_REGEXP_SPECIAL_CASE_H_
#ifdef V8_INTL_SUPPORT
-#include "unicode/uversion.h"
-namespace U_ICU_NAMESPACE {
-class UnicodeSet;
-} // namespace U_ICU_NAMESPACE
+#include "src/base/logging.h"
+#include "src/common/globals.h"
+
+#include "unicode/uchar.h"
+#include "unicode/uniset.h"
+#include "unicode/unistr.h"
namespace v8 {
namespace internal {
-// Functions to build special sets of Unicode characters that need special
-// handling under "i" mode that cannot use closeOver(USET_CASE_INSENSITIVE).
+// Sets of Unicode characters that need special handling under "i" mode
+
+// For non-unicode ignoreCase matches (aka "i", not "iu"), ECMA 262
+// defines slightly different case-folding rules than Unicode. An
+// input character should match a pattern character if the result of
+// the Canonicalize algorithm is the same for both characters.
//
-// For the characters in the "ignore set", the process should not treat other
-// characters in the result of closeOver(USET_CASE_INSENSITIVE) as case
-// equivlant under the ECMA262 RegExp "i" mode because these characters are
-// uppercase themselves that no other characters in the set uppercase to.
+// Roughly speaking, for "i" regexps, Canonicalize(c) is the same as
+// c.toUpperCase(), unless a) c.toUpperCase() is a multi-character
+// string, or b) c is non-ASCII, and c.toUpperCase() is ASCII. See
+// https://tc39.es/ecma262/#sec-runtime-semantics-canonicalize-ch for
+// the precise definition.
//
-// For the characters in the "special add set", the proecess should add only
-// those characters in the result of closeOver(USET_CASE_INSENSITIVE) which is
-// not uppercase characters as case equivlant under the ECMA262 RegExp "i" mode
-// and also that ONE uppercase character that other non uppercase character
-// uppercase into to the set. Other uppercase characters in the result of
-// closeOver(USET_CASE_INSENSITIVE) should not be considered because ECMA262
-// RegExp "i" mode consider two characters as "case equivlant" if both
-// characters uppercase to the same character.
+// While compiling such regular expressions, we need to compute the
+// set of characters that should match a given input character. (See
+// GetCaseIndependentLetters and CharacterRange::AddCaseEquivalents.)
+// For almost all characters, this can be efficiently computed using
+// UnicodeSet::closeOver(USET_CASE_INSENSITIVE). These sets represent
+// the remaining special cases.
//
-// For example, consider the following case equivalent set defined by Unicode
-// standard. Notice there are more than one uppercase characters in this set:
-// U+212B Å Angstrom Sign - an uppercase character.
-// U+00C5 Å Latin Capital Letter A with Ring Above - an uppercase character.
-// U+00E5 å Latin Small Letter A with Ring Above - a lowercase character which
-// uppercase to U+00C5.
-// In this case equivlant set is a special set and need special handling while
-// considering "case equivlant" under the ECMA262 RegExp "i" mode which is
-// different than Unicode Standard:
-// * U+212B should be included into the "ignore" set because there are no other
-// characters, under the ECMA262 "i" mode, are considered as "case equivlant"
-// to it because U+212B is itself an uppercase but neither U+00C5 nor U+00E5
-// uppercase to U+212B.
-// * U+00C5 and U+00E5 will both be included into the "special add" set. While
-// calculate the "equivlant set" under ECMA262 "i" mode, the process will
-// add U+00E5, because it is not an uppercase character in the set. The
-// process will also add U+00C5, because it is the uppercase character which
-// other non uppercase character, U+00C5, uppercase into.
+// For a character c, the rules are as follows:
//
-// For characters not included in "ignore set" and "special add set", the
-// process will just use closeOver(USET_CASE_INSENSITIVE) to calcualte, which is
-// much faster.
+// 1. If c is in neither IgnoreSet nor SpecialAddSet, then calling
+// UnicodeSet::closeOver(USET_CASE_INSENSITIVE) on a UnicodeSet
+// containing c will produce the set of characters that should
+// match /c/i (or /[c]/i), and only those characters.
//
-// Under Unicode 12.0, there are only 7 characters in the "special add set" and
-// 4 characters in "ignore set" so even the special add process is slower, it is
-// limited to a small set of cases only.
+// 2. If c is in IgnoreSet, then the only character it should match is
+// itself. However, closeOver will add additional incorrect
+// matches. For example, consider SHARP S: 'ß' (U+00DF) and 'ẞ'
+// (U+1E9E). Although closeOver('ß') = "ßẞ", uppercase('ß') is
+// "SS". Step 3.e therefore requires that 'ß' canonicalizes to
+// itself, and should not match 'ẞ'. In these cases, we can skip
+// the closeOver entirely, because it will never add an equivalent
+// character.
//
-// The implementation of these two function will be generated by calling ICU
-// icu::UnicodeSet during the build time into gen/src/regexp/special-case.cc by
-// the code in src/regexp/gen-regexp-special-case.cc.
+// 3. If c is in SpecialAddSet, then it should match at least one
+// character other than itself. However, closeOver will add at
+// least one additional incorrect match. For example, consider the
+// letter 'k'. Closing over 'k' gives "kKK" (lowercase k, uppercase
+// K, U+212A KELVIN SIGN). However, because of step 3.g, KELVIN
+// SIGN should not match either of the other two characters. As a
+// result, "k" and "K" are in SpecialAddSet (and KELVIN SIGN is in
+// IgnoreSet). To find the correct matches for characters in
+// SpecialAddSet, we closeOver the original character, but filter
+// out the results that do not have the same canonical value.
//
-// These two function will be used with LazyInstance<> template to generate
-// global sharable set to reduce memory usage and speed up performance.
+// The contents of these sets are calculated at build time by
+// src/regexp/gen-regexp-special-case.cc, which generates
+// gen/src/regexp/special-case.cc. This is done by iterating over the
+// result of closeOver for each BMP character, and finding sets for
+// which at least one character has a different canonical value than
+// another character. Characters that match no other characters in
+// their equivalence class are added to IgnoreSet. Characters that
+// match at least one other character are added to SpecialAddSet.
+
+class RegExpCaseFolding final : public AllStatic {
+ public:
+ static const icu::UnicodeSet& IgnoreSet();
+ static const icu::UnicodeSet& SpecialAddSet();
+
+ // This implements ECMAScript 2020 21.2.2.8.2 (Runtime Semantics:
+ // Canonicalize) step 3, which is used to determine whether
+ // characters match when ignoreCase is true and unicode is false.
+ static UChar32 Canonicalize(UChar32 ch) {
+ // a. Assert: ch is a UTF-16 code unit.
+ CHECK_LE(ch, 0xffff);
+
+ // b. Let s be the String value consisting of the single code unit ch.
+ icu::UnicodeString s(ch);
+
+ // c. Let u be the same result produced as if by performing the algorithm
+ // for String.prototype.toUpperCase using s as the this value.
+ // d. Assert: Type(u) is String.
+ icu::UnicodeString& u = s.toUpper();
+
+ // e. If u does not consist of a single code unit, return ch.
+ if (u.length() != 1) {
+ return ch;
+ }
+
+ // f. Let cu be u's single code unit element.
+ UChar32 cu = u.char32At(0);
-// Function to build and return the Ignore set.
-icu::UnicodeSet BuildIgnoreSet();
+ // g. If the value of ch >= 128 and the value of cu < 128, return ch.
+ if (ch >= 128 && cu < 128) {
+ return ch;
+ }
-// Function to build and return the Special Add set.
-icu::UnicodeSet BuildSpecialAddSet();
+ // h. Return cu.
+ return cu;
+ }
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 5620c6b9ce..5edbf5e579 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -214,9 +214,8 @@ void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
@@ -321,7 +320,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
-// Isolate* isolate or 0 if unicode flag.
+ // Isolate* isolate.
#ifdef V8_TARGET_OS_WIN
DCHECK(rcx == arg_reg_1);
DCHECK(rdx == arg_reg_2);
@@ -349,14 +348,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_length.
__ movq(arg_reg_3, rbx);
// Isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ movq(arg_reg_4, Immediate(0));
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ LoadAddress(arg_reg_4, ExternalReference::isolate_address(isolate()));
- }
+ __ LoadAddress(arg_reg_4, ExternalReference::isolate_address(isolate()));
{ // NOLINT: Can't find a way to open this scope without confusing the
// linter.
@@ -388,7 +380,6 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
bool read_backward,
Label* on_no_match) {
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 0bf1c2e150..64614e228a 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -37,7 +37,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) override;
void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
- bool unicode,
Label* on_no_match) override;
void CheckNotCharacter(uint32_t c, Label* on_not_equal) override;
void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
diff --git a/deps/v8/src/roots/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index 65c0c4a324..2a5fabd216 100644
--- a/deps/v8/src/roots/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -8,8 +8,8 @@
#include "src/roots/roots.h"
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
-#include "src/heap/off-thread-factory.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
@@ -66,8 +66,8 @@ ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
: read_only_roots_(reinterpret_cast<Address*>(
isolate->roots_table().read_only_roots_begin().address())) {}
-ReadOnlyRoots::ReadOnlyRoots(OffThreadFactory* factory)
- : ReadOnlyRoots(factory->read_only_roots()) {}
+ReadOnlyRoots::ReadOnlyRoots(OffThreadIsolate* isolate)
+ : ReadOnlyRoots(isolate->factory()->read_only_roots()) {}
ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 391c1facea..cf84ebf40b 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -7,6 +7,7 @@
#include "src/builtins/accessors.h"
#include "src/common/globals.h"
+#include "src/handles/handles.h"
#include "src/init/heap-symbols.h"
#include "src/objects/objects-definitions.h"
#include "src/objects/objects.h"
@@ -17,7 +18,7 @@ namespace internal {
// Forward declarations.
enum ElementsKind : uint8_t;
-class OffThreadFactory;
+class OffThreadIsolate;
template <typename T>
class Handle;
class Heap;
@@ -83,6 +84,7 @@ class Symbol;
V(Map, object_boilerplate_description_map, ObjectBoilerplateDescriptionMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(Map, code_data_container_map, CodeDataContainerMap) \
+ V(Map, coverage_info_map, CoverageInfoMap) \
V(Map, descriptor_array_map, DescriptorArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
@@ -250,8 +252,6 @@ class Symbol;
V(TemplateList, message_listeners, MessageListeners) \
/* Support for async stack traces */ \
V(HeapObject, current_microtask, CurrentMicrotask) \
- /* JSFinalizationGroup objects which need cleanup */ \
- V(Object, dirty_js_finalization_groups, DirtyJSFinalizationGroups) \
/* KeepDuringJob set for JS WeakRefs */ \
V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \
V(HeapObject, interpreter_entry_trampoline_for_profiling, \
@@ -302,14 +302,15 @@ class Symbol;
#define ACCESSOR_INFO_ROOT_LIST(V) \
ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_ROOT_LIST_ADAPTER, V)
-#define READ_ONLY_ROOT_LIST(V) \
- STRONG_READ_ONLY_ROOT_LIST(V) \
- INTERNALIZED_STRING_ROOT_LIST(V) \
- PRIVATE_SYMBOL_ROOT_LIST(V) \
- PUBLIC_SYMBOL_ROOT_LIST(V) \
- WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
- STRUCT_MAPS_LIST(V) \
- ALLOCATION_SITE_MAPS_LIST(V) \
+#define READ_ONLY_ROOT_LIST(V) \
+ STRONG_READ_ONLY_ROOT_LIST(V) \
+ INTERNALIZED_STRING_ROOT_LIST(V) \
+ PRIVATE_SYMBOL_ROOT_LIST(V) \
+ PUBLIC_SYMBOL_ROOT_LIST(V) \
+ WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
+ STRUCT_MAPS_LIST(V) \
+ TORQUE_INTERNAL_CLASS_MAPS_LIST(V) \
+ ALLOCATION_SITE_MAPS_LIST(V) \
DATA_HANDLER_MAPS_LIST(V)
#define MUTABLE_ROOT_LIST(V) \
@@ -480,7 +481,7 @@ class ReadOnlyRoots {
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
- V8_INLINE explicit ReadOnlyRoots(OffThreadFactory* factory);
+ V8_INLINE explicit ReadOnlyRoots(OffThreadIsolate* isolate);
#define ROOT_ACCESSOR(Type, name, CamelName) \
V8_INLINE class Type name() const; \
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 52c9c3098b..3e72d5e816 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -46,9 +46,14 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
HandleScope scope(isolate);
DCHECK_LE(3, args.length());
int const argc = args.length() - 3;
- // TODO(bmeurer): Remove this Arguments nonsense.
- Arguments argv(argc, args.address_of_arg_at(1));
+ // argv points to the arguments constructed by the JavaScript call.
+#ifdef V8_REVERSE_JSARGS
+ JavaScriptArguments argv(argc, args.address_of_arg_at(0));
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, argc);
+#else
+ JavaScriptArguments argv(argc, args.address_of_arg_at(1));
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+#endif
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
// TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 7c7a8b6207..34259c6e67 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -345,7 +345,7 @@ struct Xor {
// but also includes the ToInteger/ToBigInt conversion that's part of
// https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
template <template <typename> class Op>
-Object GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
+Object GetModifySetValueInBuffer(RuntimeArguments args, Isolate* isolate) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 2aa1dafd0d..b6e0ebab96 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -157,7 +157,8 @@ inline void SetHomeObject(Isolate* isolate, JSFunction method,
// shared name.
template <typename Dictionary>
MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
- Isolate* isolate, Arguments& args, // NOLINT(runtime/references)
+ Isolate* isolate,
+ RuntimeArguments& args, // NOLINT(runtime/references)
Smi index, Handle<JSObject> home_object, Handle<String> name_prefix,
Handle<Object> key) {
int int_index = index.value();
@@ -195,7 +196,7 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
// shared name.
Object GetMethodWithSharedNameAndSetHomeObject(
Isolate* isolate,
- Arguments& args, // NOLINT(runtime/references)
+ RuntimeArguments& args, // NOLINT(runtime/references)
Object index, JSObject home_object) {
DisallowHeapAllocation no_gc;
int int_index = Smi::ToInt(index);
@@ -235,7 +236,7 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
template <typename Dictionary>
bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
Handle<JSObject> receiver,
- Arguments& args, // NOLINT(runtime/references)
+ RuntimeArguments& args, // NOLINT(runtime/references)
bool* install_name_accessor = nullptr) {
Handle<Name> name_string = isolate->factory()->name_string();
@@ -312,7 +313,8 @@ bool AddDescriptorsByTemplate(
Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors_template,
Handle<NumberDictionary> elements_dictionary_template,
- Handle<JSObject> receiver, Arguments& args) { // NOLINT(runtime/references)
+ Handle<JSObject> receiver,
+ RuntimeArguments& args) { // NOLINT(runtime/references)
int nof_descriptors = descriptors_template->number_of_descriptors();
Handle<DescriptorArray> descriptors =
@@ -423,7 +425,7 @@ bool AddDescriptorsByTemplate(
Handle<NumberDictionary> elements_dictionary_template,
Handle<FixedArray> computed_properties, Handle<JSObject> receiver,
bool install_name_accessor,
- Arguments& args) { // NOLINT(runtime/references)
+ RuntimeArguments& args) { // NOLINT(runtime/references)
int computed_properties_length = computed_properties->length();
// Shallow-copy properties template.
@@ -511,7 +513,7 @@ bool InitClassPrototype(Isolate* isolate,
Handle<JSObject> prototype,
Handle<HeapObject> prototype_parent,
Handle<JSFunction> constructor,
- Arguments& args) { // NOLINT(runtime/references)
+ RuntimeArguments& args) { // NOLINT(runtime/references)
Handle<Map> map(prototype->map(), isolate);
map = Map::CopyDropDescriptors(isolate, map);
map->set_is_prototype_map(true);
@@ -555,11 +557,10 @@ bool InitClassPrototype(Isolate* isolate,
}
}
-bool InitClassConstructor(Isolate* isolate,
- Handle<ClassBoilerplate> class_boilerplate,
- Handle<HeapObject> constructor_parent,
- Handle<JSFunction> constructor,
- Arguments& args) { // NOLINT(runtime/references)
+bool InitClassConstructor(
+ Isolate* isolate, Handle<ClassBoilerplate> class_boilerplate,
+ Handle<HeapObject> constructor_parent, Handle<JSFunction> constructor,
+ RuntimeArguments& args) { // NOLINT(runtime/references)
Handle<Map> map(constructor->map(), isolate);
map = Map::CopyDropDescriptors(isolate, map);
DCHECK(map->is_prototype_map());
@@ -611,7 +612,7 @@ bool InitClassConstructor(Isolate* isolate,
MaybeHandle<Object> DefineClass(
Isolate* isolate, Handle<ClassBoilerplate> class_boilerplate,
Handle<Object> super_class, Handle<JSFunction> constructor,
- Arguments& args) { // NOLINT(runtime/references)
+ RuntimeArguments& args) { // NOLINT(runtime/references)
Handle<Object> prototype_parent;
Handle<HeapObject> constructor_parent;
@@ -661,11 +662,14 @@ MaybeHandle<Object> DefineClass(
return MaybeHandle<Object>();
}
if (FLAG_trace_maps) {
+ Handle<Map> empty_map;
+ LOG(isolate,
+ MapEvent("InitialMap", empty_map, handle(constructor->map(), isolate),
+ "init class constructor",
+ handle(constructor->shared().DebugName(), isolate)));
LOG(isolate,
- MapEvent("InitialMap", Map(), constructor->map(),
- "init class constructor", constructor->shared().DebugName()));
- LOG(isolate, MapEvent("InitialMap", Map(), prototype->map(),
- "init class prototype"));
+ MapEvent("InitialMap", empty_map, handle(prototype->map(), isolate),
+ "init class prototype"));
}
return prototype;
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 29ad7755b9..93733fe90f 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -142,6 +142,7 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
DCHECK(function->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
function->set_code(isolate->builtins()->builtin(Builtins::kCompileLazy));
+ DCHECK(!isolate->has_pending_exception());
return Smi::zero();
}
@@ -252,9 +253,10 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
Handle<JSFunction> function(frame->function(), isolate);
if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
- PrintF("[OSR - Compiling: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[OSR - Compiling: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(), " at AST id %d]\n", ast_id.ToInt());
}
maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
}
@@ -269,7 +271,9 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
if (data.OsrPcOffset().value() >= 0) {
DCHECK(BailoutId(data.OsrBytecodeOffset().value()) == ast_id);
if (FLAG_trace_osr) {
- PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - Entry at AST id %d, offset %d in optimized code]\n",
ast_id.ToInt(), data.OsrPcOffset().value());
}
@@ -298,9 +302,10 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
// the next call, otherwise we'd run unoptimized once more and
// potentially compile for OSR again.
if (FLAG_trace_osr) {
- PrintF("[OSR - Re-marking ");
- function->PrintName();
- PrintF(" for non-concurrent optimization]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[OSR - Re-marking ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(), " for non-concurrent optimization]\n");
}
function->SetOptimizationMarker(OptimizationMarker::kCompileOptimized);
}
@@ -310,9 +315,10 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
// Failed.
if (FLAG_trace_osr) {
- PrintF("[OSR - Failed: ");
- function->PrintName();
- PrintF(" at AST id %d]\n", ast_id.ToInt());
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[OSR - Failed: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(), " at AST id %d]\n", ast_id.ToInt());
}
if (!function->IsOptimized()) {
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index a7e6229ff9..0e1b8fd8fa 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -497,7 +497,7 @@ int ScriptLinePosition(Handle<Script> script, int line) {
return GetWasmFunctionOffset(script->wasm_native_module()->module(), line);
}
- Script::InitLineEnds(script);
+ Script::InitLineEnds(script->GetIsolate(), script);
FixedArray line_ends_array = FixedArray::cast(script->line_ends());
const int line_count = line_ends_array.length();
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 18e6f67c2f..b3d9f26ee5 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -198,6 +198,15 @@ RUNTIME_FUNCTION(Runtime_ThrowAccessedUninitializedVariable) {
NewReferenceError(MessageTemplate::kAccessedUninitializedVariable, name));
}
+RUNTIME_FUNCTION(Runtime_NewError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_INT32_ARG_CHECKED(template_index, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ MessageTemplate message_template = MessageTemplateFromInt(template_index);
+ return *isolate->factory()->NewError(message_template, arg0);
+}
+
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -400,6 +409,13 @@ RUNTIME_FUNCTION(Runtime_ThrowIteratorError) {
return isolate->Throw(*ErrorUtils::NewIteratorError(isolate, object));
}
+RUNTIME_FUNCTION(Runtime_ThrowSpreadArgIsNullOrUndefined) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ return ErrorUtils::ThrowSpreadArgIsNullOrUndefinedError(isolate, object);
+}
+
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index a69f903c67..c38f6e1e4c 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -589,7 +589,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
+ CONVERT_TAGGED_INDEX_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<FeedbackVector> vector;
@@ -627,7 +627,7 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
+ CONVERT_TAGGED_INDEX_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<FeedbackVector> vector;
@@ -645,7 +645,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 20f562defd..b93cdf349b 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -41,8 +41,19 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
if (!it.IsFound() && key->IsSymbol() &&
Symbol::cast(*key).is_private_name()) {
- Handle<Object> name_string(Symbol::cast(*key).description(), isolate);
- DCHECK(name_string->IsString());
+ Handle<Symbol> sym = Handle<Symbol>::cast(key);
+ Handle<Object> name(sym->description(), isolate);
+ DCHECK(name->IsString());
+ Handle<String> name_string = Handle<String>::cast(name);
+ if (sym->IsPrivateBrand()) {
+ Handle<String> class_name = (name_string->length() == 0)
+ ? isolate->factory()->anonymous_string()
+ : name_string;
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateBrand,
+ class_name, object),
+ Object);
+ }
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kInvalidPrivateMemberRead,
name_string, object),
@@ -885,7 +896,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(flag, 3);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 4);
- CONVERT_SMI_ARG_CHECKED(index, 5);
+ CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 5);
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index 272502b69f..fd5298077e 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -56,6 +56,14 @@ RUNTIME_FUNCTION(Runtime_StrictNotEqual) {
return isolate->heap()->ToBoolean(!x.StrictEquals(y));
}
+RUNTIME_FUNCTION(Runtime_ReferenceEqual) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ CONVERT_ARG_CHECKED(Object, y, 1);
+ return isolate->heap()->ToBoolean(x == y);
+}
+
RUNTIME_FUNCTION(Runtime_LessThan) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index afdfa98b0c..038aeb4369 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -15,6 +15,26 @@
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_IsJSProxy) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj.IsJSProxy());
+}
+
+RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+ return proxy.handler();
+}
+
+RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+ return proxy.target();
+}
RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 5a588cc917..52abeef583 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -45,11 +45,10 @@ Object ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
}
// May throw a RedeclarationError.
-Object DeclareGlobal(
- Isolate* isolate, Handle<JSGlobalObject> global, Handle<String> name,
- Handle<Object> value, PropertyAttributes attr, bool is_var,
- RedeclarationType redeclaration_type,
- Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>()) {
+Object DeclareGlobal(Isolate* isolate, Handle<JSGlobalObject> global,
+ Handle<String> name, Handle<Object> value,
+ PropertyAttributes attr, bool is_var,
+ RedeclarationType redeclaration_type) {
Handle<ScriptContextTable> script_contexts(
global->native_context().script_context_table(), isolate);
ScriptContextTable::LookupResult lookup;
@@ -118,20 +117,70 @@ Object DeclareGlobal(
return ReadOnlyRoots(isolate).undefined_value();
}
-Object DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
- Handle<JSFunction> closure) {
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_DeclareModuleExports) {
HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 1);
+
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
+ Handle<ClosureFeedbackCellArray>::null();
+ if (closure->has_feedback_vector()) {
+ closure_feedback_cell_array = Handle<ClosureFeedbackCellArray>(
+ closure->feedback_vector().closure_feedback_cell_array(), isolate);
+ } else {
+ closure_feedback_cell_array = Handle<ClosureFeedbackCellArray>(
+ closure->closure_feedback_cell_array(), isolate);
+ }
+
+ Handle<Context> context(isolate->context(), isolate);
+ DCHECK(context->IsModuleContext());
+ Handle<FixedArray> exports(
+ SourceTextModule::cast(context->extension()).regular_exports(), isolate);
+
+ int length = declarations->length();
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i++, {
+ Object decl = declarations->get(i);
+ int index;
+ Object value;
+ if (decl.IsSmi()) {
+ index = Smi::ToInt(decl);
+ value = ReadOnlyRoots(isolate).the_hole_value();
+ } else {
+ Handle<SharedFunctionInfo> sfi(
+ SharedFunctionInfo::cast(declarations->get(i)), isolate);
+ int feedback_index = Smi::ToInt(declarations->get(++i));
+ index = Smi::ToInt(declarations->get(++i));
+ Handle<FeedbackCell> feedback_cell =
+ closure_feedback_cell_array->GetFeedbackCell(feedback_index);
+ value = *isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ sfi, context, feedback_cell, AllocationType::kOld);
+ }
+
+ Cell::cast(exports->get(index - 1)).set_value(value);
+ });
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 1);
+
Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context(), isolate);
- Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>::null();
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
Handle<ClosureFeedbackCellArray>::null();
if (closure->has_feedback_vector()) {
- feedback_vector =
- Handle<FeedbackVector>(closure->feedback_vector(), isolate);
closure_feedback_cell_array = Handle<ClosureFeedbackCellArray>(
- feedback_vector->closure_feedback_cell_array(), isolate);
+ closure->feedback_vector().closure_feedback_cell_array(), isolate);
} else {
closure_feedback_cell_array = Handle<ClosureFeedbackCellArray>(
closure->closure_feedback_cell_array(), isolate);
@@ -168,27 +217,14 @@ Object DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
// ES#sec-globaldeclarationinstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
- Object result =
- DeclareGlobal(isolate, global, name, value, attr, is_var,
- RedeclarationType::kSyntaxError, feedback_vector);
+ Object result = DeclareGlobal(isolate, global, name, value, attr, is_var,
+ RedeclarationType::kSyntaxError);
if (isolate->has_pending_exception()) return result;
});
return ReadOnlyRoots(isolate).undefined_value();
}
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 1);
-
- return DeclareGlobals(isolate, declarations, closure);
-}
-
namespace {
Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
@@ -619,19 +655,6 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
return *context;
}
-RUNTIME_FUNCTION(Runtime_PushModuleContext) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(SourceTextModule, module, 0);
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
-
- Handle<NativeContext> outer(NativeContext::cast(isolate->context()), isolate);
- Handle<Context> context =
- isolate->factory()->NewModuleContext(module, outer, scope_info);
- isolate->set_context(*context);
- return *context;
-}
-
RUNTIME_FUNCTION(Runtime_PushCatchContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index fb06a8b8f9..220a4a473c 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -91,8 +91,7 @@ v8::Local<v8::Value> NewRangeException(v8::Isolate* isolate,
const char* message) {
return v8::Exception::RangeError(
v8::String::NewFromOneByte(isolate,
- reinterpret_cast<const uint8_t*>(message),
- v8::NewStringType::kNormal)
+ reinterpret_cast<const uint8_t*>(message))
.ToLocalChecked());
}
@@ -449,9 +448,10 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// Ensure that the function is marked for non-concurrent optimization, so that
// subsequent runs don't also optimize.
if (FLAG_trace_osr) {
- PrintF("[OSR - OptimizeOsr marking ");
- function->ShortPrint();
- PrintF(" for non-concurrent optimization]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[OSR - OptimizeOsr marking ");
+ function->ShortPrint(scope.file());
+ PrintF(scope.file(), " for non-concurrent optimization]\n");
}
JSFunction::EnsureFeedbackVector(function);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
@@ -730,7 +730,7 @@ RUNTIME_FUNCTION(Runtime_SimulateNewspaceFull) {
HandleScope scope(isolate);
Heap* heap = isolate->heap();
NewSpace* space = heap->new_space();
- AlwaysAllocateScope always_allocate(heap);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
do {
FillUpOneNewSpacePage(isolate, heap);
} while (space->AddFreshPage());
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 2d6fbc585f..170c0bcdbc 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -36,9 +36,19 @@ namespace internal {
// Cast the given argument to a Smi and store its value in an int variable
// with the given name. If the argument is not a Smi we crash safely.
-#define CONVERT_SMI_ARG_CHECKED(name, index) \
- CHECK(args[index].IsSmi()); \
- int name = args.smi_at(index);
+#define CONVERT_SMI_ARG_CHECKED(name, index) \
+ CHECK(args[index].IsSmi()); \
+ int name = args.smi_at(index); \
+ /* Ensure we have a Smi and not a TaggedIndex */ \
+ DCHECK_IMPLIES(args[index].IsTaggedIndex(), \
+ name == TaggedIndex(args[index].ptr()).value());
+
+// Cast the given argument to a TaggedIndex and store its value in an int
+// variable with the given name. If the argument is not a TaggedIndex we crash
+// safely.
+#define CONVERT_TAGGED_INDEX_ARG_CHECKED(name, index) \
+ CHECK(args[index].IsTaggedIndex()); \
+ int name = args.tagged_index_at(index);
// Cast the given argument to a double and store it in a variable with
// the given name. If the argument is not a number (as opposed to
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 3e0ab34591..f65922064f 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -19,6 +19,7 @@
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-value.h"
@@ -28,15 +29,33 @@ namespace internal {
namespace {
+template <typename FrameType, StackFrame::Type... skipped_frame_types>
+class FrameFinder {
+ static_assert(sizeof...(skipped_frame_types) > 0,
+ "Specify at least one frame to skip");
+
+ public:
+ explicit FrameFinder(Isolate* isolate)
+ : frame_iterator_(isolate, isolate->thread_local_top()) {
+ for (auto type : {skipped_frame_types...}) {
+ DCHECK_EQ(type, frame_iterator_.frame()->type());
+ USE(type);
+ frame_iterator_.Advance();
+ }
+ // Type check the frame where the iterator stopped now.
+ DCHECK_NOT_NULL(frame());
+ }
+
+ FrameType* frame() { return FrameType::cast(frame_iterator_.frame()); }
+
+ private:
+ StackFrameIterator frame_iterator_;
+};
+
WasmInstanceObject GetWasmInstanceOnStackTop(Isolate* isolate) {
- StackFrameIterator it(isolate, isolate->thread_local_top());
- // On top: C entry stub.
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Next: the wasm compiled frame.
- DCHECK(it.frame()->is_wasm_compiled());
- WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
- return frame->wasm_instance();
+ return FrameFinder<WasmCompiledFrame, StackFrame::EXIT>(isolate)
+ .frame()
+ ->wasm_instance();
}
Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
@@ -58,7 +77,10 @@ class ClearThreadInWasmScope {
Object ThrowWasmError(Isolate* isolate, MessageTemplate message) {
HandleScope scope(isolate);
- Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(message);
+ Handle<JSObject> error_obj = isolate->factory()->NewWasmRuntimeError(message);
+ JSObject::AddProperty(isolate, error_obj,
+ isolate->factory()->wasm_uncatchable_symbol(),
+ isolate->factory()->true_value(), NONE);
return isolate->Throw(*error_obj);
}
} // namespace
@@ -194,20 +216,15 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
Handle<WasmInstanceObject> instance;
Address frame_pointer = 0;
{
- StackFrameIterator it(isolate, isolate->thread_local_top());
- // On top: C entry stub.
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Next: the wasm interpreter entry.
- DCHECK_EQ(StackFrame::WASM_INTERPRETER_ENTRY, it.frame()->type());
- instance = handle(
- WasmInterpreterEntryFrame::cast(it.frame())->wasm_instance(), isolate);
- frame_pointer = it.frame()->fp();
+ FrameFinder<WasmInterpreterEntryFrame, StackFrame::EXIT> frame_finder(
+ isolate);
+ instance = handle(frame_finder.frame()->wasm_instance(), isolate);
+ frame_pointer = frame_finder.frame()->fp();
}
// Reserve buffers for argument and return values.
DCHECK_GE(instance->module()->functions.size(), func_index);
- wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
+ const wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
DCHECK_GE(kMaxInt, sig->parameter_count());
int num_params = static_cast<int>(sig->parameter_count());
ScopedVector<wasm::WasmValue> wasm_args(num_params);
@@ -221,33 +238,33 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
Address arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_params; ++i) {
#define CASE_ARG_TYPE(type, ctype) \
- case wasm::type: \
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)), \
- sizeof(ctype)); \
+ case wasm::ValueType::type: \
+ DCHECK_EQ(sig->GetParam(i).element_size_bytes(), sizeof(ctype)); \
wasm_args[i] = \
wasm::WasmValue(base::ReadUnalignedValue<ctype>(arg_buf_ptr)); \
arg_buf_ptr += sizeof(ctype); \
break;
- switch (sig->GetParam(i)) {
- CASE_ARG_TYPE(kWasmI32, uint32_t)
- CASE_ARG_TYPE(kWasmI64, uint64_t)
- CASE_ARG_TYPE(kWasmF32, float)
- CASE_ARG_TYPE(kWasmF64, double)
+ switch (sig->GetParam(i).kind()) {
+ CASE_ARG_TYPE(kI32, uint32_t)
+ CASE_ARG_TYPE(kI64, uint64_t)
+ CASE_ARG_TYPE(kF32, float)
+ CASE_ARG_TYPE(kF64, double)
#undef CASE_ARG_TYPE
- case wasm::kWasmAnyRef:
- case wasm::kWasmFuncRef:
- case wasm::kWasmNullRef:
- case wasm::kWasmExnRef: {
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)),
- kSystemPointerSize);
- Handle<Object> ref(base::ReadUnalignedValue<Object>(arg_buf_ptr),
- isolate);
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef: {
+ DCHECK_EQ(sig->GetParam(i).element_size_bytes(), kSystemPointerSize);
+ Handle<Object> ref(
+ Object(base::ReadUnalignedValue<Address>(arg_buf_ptr)), isolate);
DCHECK_IMPLIES(sig->GetParam(i) == wasm::kWasmNullRef, ref->IsNull());
wasm_args[i] = wasm::WasmValue(ref);
arg_buf_ptr += kSystemPointerSize;
break;
}
- default:
+ case wasm::ValueType::kStmt:
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kBottom:
UNREACHABLE();
}
}
@@ -275,24 +292,22 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_returns; ++i) {
#define CASE_RET_TYPE(type, ctype) \
- case wasm::type: \
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)), \
- sizeof(ctype)); \
+ case wasm::ValueType::type: \
+ DCHECK_EQ(sig->GetReturn(i).element_size_bytes(), sizeof(ctype)); \
base::WriteUnalignedValue<ctype>(arg_buf_ptr, wasm_rets[i].to<ctype>()); \
arg_buf_ptr += sizeof(ctype); \
break;
- switch (sig->GetReturn(i)) {
- CASE_RET_TYPE(kWasmI32, uint32_t)
- CASE_RET_TYPE(kWasmI64, uint64_t)
- CASE_RET_TYPE(kWasmF32, float)
- CASE_RET_TYPE(kWasmF64, double)
+ switch (sig->GetReturn(i).kind()) {
+ CASE_RET_TYPE(kI32, uint32_t)
+ CASE_RET_TYPE(kI64, uint64_t)
+ CASE_RET_TYPE(kF32, float)
+ CASE_RET_TYPE(kF64, double)
#undef CASE_RET_TYPE
- case wasm::kWasmAnyRef:
- case wasm::kWasmFuncRef:
- case wasm::kWasmNullRef:
- case wasm::kWasmExnRef: {
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)),
- kSystemPointerSize);
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef: {
+ DCHECK_EQ(sig->GetReturn(i).element_size_bytes(), kSystemPointerSize);
DCHECK_IMPLIES(sig->GetReturn(i) == wasm::kWasmNullRef,
wasm_rets[i].to_anyref()->IsNull());
base::WriteUnalignedValue<Object>(arg_buf_ptr,
@@ -333,13 +348,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
ClearThreadInWasmScope flag_scope;
#ifdef DEBUG
- StackFrameIterator it(isolate, isolate->thread_local_top());
- // On top: C entry stub.
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Next: the wasm lazy compile frame.
- DCHECK_EQ(StackFrame::WASM_COMPILE_LAZY, it.frame()->type());
- DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance());
+ FrameFinder<WasmCompileLazyFrame, StackFrame::EXIT> frame_finder(isolate);
+ DCHECK_EQ(*instance, frame_finder.frame()->wasm_instance());
#endif
DCHECK(isolate->context().is_null());
@@ -372,6 +382,7 @@ Handle<JSArrayBuffer> getSharedArrayBuffer(Handle<WasmInstanceObject> instance,
}
RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
+ ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -382,42 +393,35 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
return FutexEmulation::Wake(array_buffer, address, count);
}
-double WaitTimeoutInMs(double timeout_ns) {
- return timeout_ns < 0
- ? V8_INFINITY
- : timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
- base::Time::kMicrosecondsPerMillisecond);
-}
-
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
+ ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
- CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 3);
- double timeout_ms = WaitTimeoutInMs(timeout_ns);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
+
Handle<JSArrayBuffer> array_buffer =
getSharedArrayBuffer(instance, isolate, address);
- return FutexEmulation::Wait32(isolate, array_buffer, address, expected_value,
- timeout_ms);
+ return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
+ expected_value, timeout_ns->AsInt64());
}
RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
+ ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
- CONVERT_NUMBER_CHECKED(uint32_t, expected_value_high, Uint32, args[2]);
- CONVERT_NUMBER_CHECKED(uint32_t, expected_value_low, Uint32, args[3]);
- CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 4);
- int64_t expected_value = (static_cast<uint64_t>(expected_value_high) << 32) |
- static_cast<uint64_t>(expected_value_low);
- double timeout_ms = WaitTimeoutInMs(timeout_ns);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
+
Handle<JSArrayBuffer> array_buffer =
getSharedArrayBuffer(instance, isolate, address);
- return FutexEmulation::Wait64(isolate, array_buffer, address, expected_value,
- timeout_ms);
+ return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
+ expected_value->AsInt64(),
+ timeout_ns->AsInt64());
}
namespace {
@@ -497,17 +501,15 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
RUNTIME_FUNCTION(Runtime_WasmTableInit) {
HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(elem_segment_index, 1);
- CONVERT_UINT32_ARG_CHECKED(dst, 2);
- CONVERT_UINT32_ARG_CHECKED(src, 3);
- CONVERT_UINT32_ARG_CHECKED(count, 4);
+ DCHECK_EQ(6, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(elem_segment_index, 2);
+ CONVERT_UINT32_ARG_CHECKED(dst, 3);
+ CONVERT_UINT32_ARG_CHECKED(src, 4);
+ CONVERT_UINT32_ARG_CHECKED(count, 5);
- DCHECK(isolate->context().is_null());
- isolate->set_context(instance->native_context());
+ DCHECK(!isolate->context().is_null());
bool oob = !WasmInstanceObject::InitTableEntries(
isolate, instance, table_index, elem_segment_index, dst, src, count);
@@ -517,16 +519,15 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- DCHECK(isolate->context().is_null());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_dst_index, 0);
- CONVERT_UINT32_ARG_CHECKED(table_src_index, 1);
- CONVERT_UINT32_ARG_CHECKED(dst, 2);
- CONVERT_UINT32_ARG_CHECKED(src, 3);
- CONVERT_UINT32_ARG_CHECKED(count, 4);
+ DCHECK_EQ(6, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_dst_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(table_src_index, 2);
+ CONVERT_UINT32_ARG_CHECKED(dst, 3);
+ CONVERT_UINT32_ARG_CHECKED(src, 4);
+ CONVERT_UINT32_ARG_CHECKED(count, 5);
+
+ DCHECK(!isolate->context().is_null());
bool oob = !WasmInstanceObject::CopyTableEntries(
isolate, instance, table_dst_index, table_src_index, dst, src, count);
@@ -576,11 +577,11 @@ RUNTIME_FUNCTION(Runtime_WasmTableFill) {
// Even when table.fill goes out-of-bounds, as many entries as possible are
// put into the table. Only afterwards we trap.
uint32_t fill_count = std::min(count, table_size - start);
- WasmTableObject::Fill(isolate, table, start, value, fill_count);
-
if (fill_count < count) {
return ThrowTableOutOfBounds(isolate, instance);
}
+ WasmTableObject::Fill(isolate, table, start, value, fill_count);
+
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -602,5 +603,55 @@ RUNTIME_FUNCTION(Runtime_WasmNewMultiReturnJSArray) {
fixed_array_handle, PACKED_ELEMENTS);
return *array;
}
+
+RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ FrameFinder<WasmCompiledFrame, StackFrame::EXIT, StackFrame::WASM_DEBUG_BREAK>
+ frame_finder(isolate);
+ auto instance = handle(frame_finder.frame()->wasm_instance(), isolate);
+ int position = frame_finder.frame()->position();
+ isolate->set_context(instance->native_context());
+
+ // Enter the debugger.
+ DebugScope debug_scope(isolate->debug());
+
+ const auto undefined = ReadOnlyRoots(isolate).undefined_value();
+ WasmCompiledFrame* frame = frame_finder.frame();
+ auto* debug_info = frame->native_module()->GetDebugInfo();
+ if (debug_info->IsStepping(frame)) {
+ debug_info->ClearStepping();
+ isolate->debug()->ClearStepping();
+ isolate->debug()->OnDebugBreak(isolate->factory()->empty_fixed_array());
+ return undefined;
+ }
+
+ // Check whether we hit a breakpoint.
+ Handle<Script> script(instance->module_object().script(), isolate);
+ Handle<FixedArray> breakpoints;
+ if (WasmScript::CheckBreakPoints(isolate, script, position)
+ .ToHandle(&breakpoints)) {
+ debug_info->ClearStepping();
+ isolate->debug()->ClearStepping();
+ if (isolate->debug()->break_points_active()) {
+ // We hit one or several breakpoints. Notify the debug listeners.
+ isolate->debug()->OnDebugBreak(breakpoints);
+ }
+ } else {
+ // Unused breakpoint. Possible scenarios:
+ // 1. We hit a breakpoint that was already removed,
+ // 2. We hit a stepping breakpoint after resuming,
+ // 3. We hit a stepping breakpoint during a stepOver on a recursive call.
+ // 4. The breakpoint was set in a different isolate.
+ // We can handle the first three cases by simply removing the breakpoint (if
+ // it exists), since this will also recompile the function without the
+ // stepping breakpoints.
+ // TODO(thibaudm/clemensb): handle case 4.
+ debug_info->RemoveBreakpoint(frame->function_index(), position, isolate);
+ }
+
+ return undefined;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 2bba57f910..e51338ba6d 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -192,6 +192,34 @@ bool Runtime::MayAllocate(FunctionId id) {
}
}
+bool Runtime::IsWhitelistedForFuzzing(FunctionId id) {
+ CHECK(FLAG_allow_natives_for_fuzzing);
+ switch (id) {
+ // Runtime functions whitelisted for all fuzzers. Only add functions that
+ // help increase coverage.
+ case Runtime::kArrayBufferDetach:
+ case Runtime::kDeoptimizeFunction:
+ case Runtime::kDeoptimizeNow:
+ case Runtime::kEnableCodeLoggingForTesting:
+ case Runtime::kGetUndetectable:
+ case Runtime::kNeverOptimizeFunction:
+ case Runtime::kOptimizeFunctionOnNextCall:
+ case Runtime::kOptimizeOsr:
+ case Runtime::kPrepareFunctionForOptimization:
+ case Runtime::kSetAllocationTimeout:
+ case Runtime::kSimulateNewspaceFull:
+ return true;
+ // Runtime functions only permitted for non-differential fuzzers.
+ // This list may contain functions performing extra checks or returning
+ // different values in the context of different flags passed to V8.
+ case Runtime::kHeapObjectVerify:
+ case Runtime::kIsBeingInterpreted:
+ return !FLAG_allow_natives_for_differential_fuzzing;
+ default:
+ return false;
+ }
+}
+
const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
int length) {
base::CallOnce(&initialize_function_name_map_once,
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index c9c4dd5931..57500be510 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -219,6 +219,7 @@ namespace internal {
F(GetTemplateObject, 3, 1) \
F(IncrementUseCounter, 1, 1) \
F(BytecodeBudgetInterrupt, 1, 1) \
+ F(NewError, 2, 1) \
F(NewReferenceError, 2, 1) \
F(NewSyntaxError, 2, 1) \
F(NewTypeError, 2, 1) \
@@ -238,6 +239,7 @@ namespace internal {
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowInvalidTypedArrayAlignment, 2, 1) \
F(ThrowIteratorError, 1, 1) \
+ F(ThrowSpreadArgIsNullOrUndefined, 1, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowNotConstructor, 1, 1) \
F(ThrowPatternAssignmentNonCoercible, 1, 1) \
@@ -348,7 +350,8 @@ namespace internal {
F(LessThanOrEqual, 2, 1) \
F(NotEqual, 2, 1) \
F(StrictEqual, 2, 1) \
- F(StrictNotEqual, 2, 1)
+ F(StrictNotEqual, 2, 1) \
+ F(ReferenceEqual, 2, 1)
#define FOR_EACH_INTRINSIC_PROMISE(F, I) \
F(EnqueueMicrotask, 1, 1) \
@@ -371,6 +374,9 @@ namespace internal {
F(CheckProxyHasTrapResult, 2, 1) \
F(CheckProxyDeleteTrapResult, 2, 1) \
F(GetPropertyWithReceiver, 3, 1) \
+ F(IsJSProxy, 1, 1) \
+ F(JSProxyGetHandler, 1, 1) \
+ F(JSProxyGetTarget, 1, 1) \
F(SetPropertyWithReceiver, 4, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
@@ -387,6 +393,7 @@ namespace internal {
F(DeclareEvalFunction, 2, 1) \
F(DeclareEvalVar, 1, 1) \
F(DeclareGlobals, 2, 1) \
+ F(DeclareModuleExports, 2, 1) \
F(DeleteLookupSlot, 1, 1) \
F(LoadLookupSlot, 1, 1) \
F(LoadLookupSlotInsideTypeof, 1, 1) \
@@ -401,7 +408,6 @@ namespace internal {
F(NewStrictArguments, 1, 1) \
F(PushBlockContext, 1, 1) \
F(PushCatchContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
F(PushWithContext, 2, 1) \
F(StoreGlobalNoHoleCheckForReplLet, 2, 1) \
F(StoreLookupSlot_Sloppy, 2, 1) \
@@ -563,14 +569,15 @@ namespace internal {
F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
- F(WasmTableInit, 5, 1) \
- F(WasmTableCopy, 5, 1) \
+ F(WasmTableInit, 6, 1) \
+ F(WasmTableCopy, 6, 1) \
F(WasmTableGrow, 3, 1) \
F(WasmTableFill, 4, 1) \
F(WasmIsValidFuncRefValue, 1, 1) \
F(WasmCompileLazy, 2, 1) \
F(WasmNewMultiReturnFixedArray, 1, 1) \
- F(WasmNewMultiReturnJSArray, 1, 1)
+ F(WasmNewMultiReturnJSArray, 1, 1) \
+ F(WasmDebugBreak, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
F(DebugBreakOnBytecode, 1, 2) \
@@ -709,10 +716,14 @@ class Runtime : public AllStatic {
// sentinel, always.
static bool IsNonReturning(FunctionId id);
- // Check if a runtime function with the given {id} may trigger a heap
+ // Check if a runtime function with the given {id} may trigger a heap
// allocation.
static bool MayAllocate(FunctionId id);
+ // Check if a runtime function with the given {id} is whitelisted for
+ // using it with fuzzers.
+ static bool IsWhitelistedForFuzzing(FunctionId id);
+
// Get the intrinsic function with the given name.
static const Function* FunctionForName(const unsigned char* name, int length);
diff --git a/deps/v8/src/snapshot/DEPS b/deps/v8/src/snapshot/DEPS
index 70ef55e340..93f17c9286 100644
--- a/deps/v8/src/snapshot/DEPS
+++ b/deps/v8/src/snapshot/DEPS
@@ -2,6 +2,9 @@ specific_include_rules = {
"mksnapshot\.cc": [
"+include/libplatform/libplatform.h",
],
+ "snapshot-compression.cc": [
+ "+third_party/zlib",
+ ],
"serializer-common.cc": [
"+third_party/zlib",
],
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 30098bab46..d155313872 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -190,12 +190,12 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
// bytecode array stored within the InterpreterData, which is the important
// information. On deserialization we'll create our code objects again, if
// --interpreted-frames-native-stack is on. See v8:9122 for more context
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack) &&
obj.IsInterpreterData()) {
obj = InterpreterData::cast(obj).bytecode_array();
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj.IsMap());
@@ -215,7 +215,7 @@ void CodeSerializer::SerializeGeneric(HeapObject heap_object) {
serializer.Serialize();
}
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
// NOTE(mmarchini): when FLAG_interpreted_frames_native_stack is on, we want to
// create duplicates of InterpreterEntryTrampoline for the deserialized
// functions, otherwise we'll call the builtin IET for those functions (which
@@ -255,7 +255,7 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
column_num));
}
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source,
@@ -301,11 +301,11 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
isolate->is_profiling() ||
isolate->code_event_dispatcher()->IsListeningToCodeEvents();
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack))
CreateInterpreterDataForDeserializedCode(isolate, result,
log_code_creation);
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
bool needs_source_positions = isolate->NeedsSourcePositionsForProfiling();
@@ -323,7 +323,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
result->StartPosition(), result->EndPosition(), *name));
}
if (log_code_creation) {
- Script::InitLineEnds(script);
+ Script::InitLineEnds(isolate, script);
SharedFunctionInfo::ScriptIterator iter(isolate, *script);
for (SharedFunctionInfo info = iter.Next(); !info.is_null();
@@ -350,7 +350,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
if (needs_source_positions) {
Handle<Script> script(Script::cast(result->script()), isolate);
- Script::InitLineEnds(script);
+ Script::InitLineEnds(isolate, script);
}
return scope.CloseAndEscape(result);
}
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index ed179f6842..e58e7b66ac 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -45,10 +45,12 @@ Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
int chunk_index = current_chunk_[space_number];
DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
#endif
+#ifndef V8_ENABLE_THIRD_PARTY_HEAP
if (space == SnapshotSpace::kCode)
MemoryChunk::FromAddress(address)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject(address);
+#endif
return address;
}
}
@@ -56,16 +58,22 @@ Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
Address address;
HeapObject obj;
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- AllocationType type = (space == SnapshotSpace::kCode)
- ? AllocationType::kCode
- : AllocationType::kYoung;
- return heap_->DeserializerAllocate(type, size);
- }
- // TODO(steveblackburn) The following logic and AllocateRaw() above should
- // be lifted into Heap, pushing the logic to heap_->DeserializerAllocate().
- // The implementation below and AllocateRaw() above leak heap abstractions
- // such as particular structure of heap spaces.
+ // TODO(steveblackburn) Note that the third party heap allocates objects
+ // at reservation time, which means alignment must be acted on at
+ // reservation time, not here. Since the current encoding does not
+ // inform the reservation of the alignment, it must be conservatively
+ // aligned.
+ //
+ // A more general approach will be to avoid reservation altogether, and
+ // instead of chunk index/offset encoding, simply encode backreferences
+ // by index (this can be optimized by applying something like register
+ // allocation to keep the metadata needed to record the in-flight
+ // backreferences minimal). This has the significant advantage of
+ // abstracting away the details of the memory allocator from this code.
+ // At each allocation, the regular allocator performs allocation,
+ // and a fixed-sized table is used to track and fix all back references.
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return AllocateRaw(space, size);
+
if (next_alignment_ != kWordAligned) {
const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
address = AllocateRaw(space, reserved);
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 127cfae29e..fdb4babe07 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -301,8 +301,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
}
} else if (obj.IsJSArrayBuffer()) {
JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
- buffer.set_extension(nullptr);
- // Only fixup for the off-heap case. This may trigger GC.
+ // Postpone allocation of backing store to avoid triggering the GC.
if (buffer.backing_store() != nullptr) {
new_off_heap_array_buffers_.push_back(handle(buffer, isolate_));
}
@@ -668,6 +667,7 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
}
case kOffHeapBackingStore: {
+ AlwaysAllocateScope scope(isolate->heap());
int byte_length = source_.GetInt();
std::unique_ptr<BackingStore> backing_store =
BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared,
@@ -755,8 +755,36 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
STATIC_ASSERT(kNumberOfFixedRawData == 32);
SIXTEEN_CASES(kFixedRawData)
SIXTEEN_CASES(kFixedRawData + 16) {
- int size_in_tagged = data - kFixedRawDataStart;
- source_.CopyRaw(current.ToVoidPtr(), size_in_tagged * kTaggedSize);
+ // This bytecode has become very confusing with recent changes due to
+ // pointer compression. From comments and variable names it implies that
+ // the length unit is words/kPointerSize, but the unit is actually
+ // kTaggedSize since https://chromium-review.googlesource.com/c/1388529.
+ //
+ // Also, contents can be (tagged) Smis or just a raw byte sequence. In
+ // the case of Smis we must be careful when deserializing into full
+ // object slots. It is not valid to deserialize a sequence of >1 Smis
+ // into full object slots in compressed pointer builds.
+ //
+ // Likewise one must pay attention to endianness when deserializing a
+ // smi into a full object slot. That is what the code below is trying to
+ // address.
+ //
+ // The solution below works because we currently never deserialize >1
+ // Smi into full object slots, or raw byte sequences into full object
+ // slots. But those assumptions are fragile.
+ //
+ const int size_in_tagged = data - kFixedRawDataStart;
+ const int size_in_bytes = size_in_tagged * kTaggedSize;
+ Address addr = current.address();
+ DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize,
+ size_in_tagged == 1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ if (kTaggedSize != TSlot::kSlotDataSize) {
+ // Should only be reached when deserializing a Smi root.
+ addr += kTaggedSize;
+ }
+#endif
+ source_.CopyRaw(reinterpret_cast<void*>(addr), size_in_bytes);
current += size_in_tagged;
break;
}
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 871aa6b3a7..62814a881a 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -29,7 +29,7 @@ class Object;
// of objects found in code.
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \
- V8_EMBEDDED_CONSTANT_POOL
+ defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL
#define V8_CODE_EMBEDS_OBJECT_POINTER 1
#else
#define V8_CODE_EMBEDS_OBJECT_POINTER 0
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index c5e131e3fe..f1d6efc767 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -118,7 +118,9 @@ int PlatformEmbeddedFileWriterGeneric::HexLiteral(uint64_t value) {
return fprintf(fp_, "0x%" PRIx64, value);
}
-void PlatformEmbeddedFileWriterGeneric::FilePrologue() {}
+void PlatformEmbeddedFileWriterGeneric::FilePrologue() {
+ // TODO(v8:10026): Add ELF note required for BTI.
+}
void PlatformEmbeddedFileWriterGeneric::DeclareExternalFilename(
int fileid, const char* filename) {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 86ac01d6fb..f722ffe4ae 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -35,15 +35,6 @@ namespace internal {
// http://infocenter.arm.com/help/topic/com.arm.doc.dui0802b/index.html
// Microsoft ARM assembler and assembly language docs:
// https://docs.microsoft.com/en-us/cpp/assembler/arm/arm-assembler-reference
-#if defined(V8_COMPILER_IS_MSVC)
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_ARM)
-#define V8_ASSEMBLER_IS_MARMASM
-#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-#define V8_ASSEMBLER_IS_MASM
-#else
-#error Unknown Windows assembler target architecture.
-#endif
-#endif
// Name mangling.
// Symbols are prefixed with an underscore on 32-bit architectures.
@@ -64,44 +55,6 @@ namespace internal {
namespace {
-const char* DirectiveAsString(DataDirective directive) {
-#if defined(V8_ASSEMBLER_IS_MASM)
- switch (directive) {
- case kByte:
- return "BYTE";
- case kLong:
- return "DWORD";
- case kQuad:
- return "QWORD";
- default:
- UNREACHABLE();
- }
-#elif defined(V8_ASSEMBLER_IS_MARMASM)
- switch (directive) {
- case kByte:
- return "DCB";
- case kLong:
- return "DCDU";
- case kQuad:
- return "DCQU";
- default:
- UNREACHABLE();
- }
-#else
- switch (directive) {
- case kByte:
- return ".byte";
- case kLong:
- return ".long";
- case kQuad:
- return ".quad";
- case kOcta:
- return ".octa";
- }
- UNREACHABLE();
-#endif
-}
-
#if defined(V8_OS_WIN_X64)
void WriteUnwindInfoEntry(PlatformEmbeddedFileWriterWin* w,
@@ -308,6 +261,47 @@ void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
} // namespace
+const char* PlatformEmbeddedFileWriterWin::DirectiveAsString(
+ DataDirective directive) {
+#if defined(V8_COMPILER_IS_MSVC)
+ if (target_arch_ != EmbeddedTargetArch::kArm64) {
+ switch (directive) {
+ case kByte:
+ return "BYTE";
+ case kLong:
+ return "DWORD";
+ case kQuad:
+ return "QWORD";
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (directive) {
+ case kByte:
+ return "DCB";
+ case kLong:
+ return "DCDU";
+ case kQuad:
+ return "DCQU";
+ default:
+ UNREACHABLE();
+ }
+ }
+#else
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".quad";
+ case kOcta:
+ return ".octa";
+ }
+ UNREACHABLE();
+#endif
+}
+
void PlatformEmbeddedFileWriterWin::MaybeEmitUnwindData(
const char* unwind_info_symbol, const char* embedded_blob_data_symbol,
const EmbeddedData* blob, const void* unwind_infos) {
@@ -328,20 +322,57 @@ void PlatformEmbeddedFileWriterWin::MaybeEmitUnwindData(
#endif // V8_OS_WIN64
}
-// Windows, MSVC, not arm/arm64.
+// Windows, MSVC
// -----------------------------------------------------------------------------
-#if defined(V8_ASSEMBLER_IS_MASM)
+#if defined(V8_COMPILER_IS_MSVC)
-// For MSVC builds we emit assembly in MASM syntax.
+// For x64 MSVC builds we emit assembly in MASM syntax.
// See https://docs.microsoft.com/en-us/cpp/assembler/masm/directives-reference.
+// For Arm build, we emit assembly in MARMASM syntax.
+// Note that the same mksnapshot has to be used to compile the host and target.
-void PlatformEmbeddedFileWriterWin::SectionText() { fprintf(fp_, ".CODE\n"); }
+// The AARCH64 ABI requires instructions be 4-byte-aligned and Windows does
+// not have a stricter alignment requirement (see the TEXTAREA macro of
+// kxarm64.h in the Windows SDK), so code is 4-byte-aligned.
+// The data fields in the emitted assembly tend to be accessed with 8-byte
+// LDR instructions, so data is 8-byte-aligned.
+//
+// armasm64's warning A4228 states
+// Alignment value exceeds AREA alignment; alignment not guaranteed
+// To ensure that ALIGN directives are honored, their values are defined as
+// equal to their corresponding AREA's ALIGN attributes.
-void PlatformEmbeddedFileWriterWin::SectionData() { fprintf(fp_, ".DATA\n"); }
+#define ARM64_DATA_ALIGNMENT_POWER (3)
+#define ARM64_DATA_ALIGNMENT (1 << ARM64_DATA_ALIGNMENT_POWER)
+#define ARM64_CODE_ALIGNMENT_POWER (2)
+#define ARM64_CODE_ALIGNMENT (1 << ARM64_CODE_ALIGNMENT_POWER)
+
+void PlatformEmbeddedFileWriterWin::SectionText() {
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " AREA |.text|, CODE, ALIGN=%d, READONLY\n",
+ ARM64_CODE_ALIGNMENT_POWER);
+ } else {
+ fprintf(fp_, ".CODE\n");
+ }
+}
+
+void PlatformEmbeddedFileWriterWin::SectionData() {
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " AREA |.data|, DATA, ALIGN=%d, READWRITE\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+ } else {
+ fprintf(fp_, ".DATA\n");
+ }
+}
void PlatformEmbeddedFileWriterWin::SectionRoData() {
- fprintf(fp_, ".CONST\n");
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " AREA |.rodata|, DATA, ALIGN=%d, READONLY\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+ } else {
+ fprintf(fp_, ".CONST\n");
+ }
}
void PlatformEmbeddedFileWriterWin::DeclareUint32(const char* name,
@@ -359,48 +390,93 @@ void PlatformEmbeddedFileWriterWin::DeclarePointerToSymbol(const char* name,
}
void PlatformEmbeddedFileWriterWin::StartPdataSection() {
- fprintf(fp_, "OPTION DOTNAME\n");
- fprintf(fp_, ".pdata SEGMENT DWORD READ ''\n");
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " AREA |.pdata|, DATA, ALIGN=%d, READONLY\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+ } else {
+ fprintf(fp_, "OPTION DOTNAME\n");
+ fprintf(fp_, ".pdata SEGMENT DWORD READ ''\n");
+ }
}
void PlatformEmbeddedFileWriterWin::EndPdataSection() {
- fprintf(fp_, ".pdata ENDS\n");
+ if (target_arch_ != EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, ".pdata ENDS\n");
+ }
}
void PlatformEmbeddedFileWriterWin::StartXdataSection() {
- fprintf(fp_, "OPTION DOTNAME\n");
- fprintf(fp_, ".xdata SEGMENT DWORD READ ''\n");
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " AREA |.xdata|, DATA, ALIGN=%d, READONLY\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+ } else {
+ fprintf(fp_, "OPTION DOTNAME\n");
+ fprintf(fp_, ".xdata SEGMENT DWORD READ ''\n");
+ }
}
void PlatformEmbeddedFileWriterWin::EndXdataSection() {
- fprintf(fp_, ".xdata ENDS\n");
+ if (target_arch_ != EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, ".xdata ENDS\n");
+ }
}
void PlatformEmbeddedFileWriterWin::DeclareExternalFunction(const char* name) {
- fprintf(fp_, "EXTERN %s : PROC\n", name);
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " EXTERN %s \n", name);
+ } else {
+ fprintf(fp_, "EXTERN %s : PROC\n", name);
+ }
}
void PlatformEmbeddedFileWriterWin::DeclareRvaToSymbol(const char* name,
uint64_t offset) {
- if (offset > 0) {
- fprintf(fp_, "DD IMAGEREL %s+%llu\n", name, offset);
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ if (offset > 0) {
+ fprintf(fp_, " DCD %s + %llu\n", name, offset);
+ } else {
+ fprintf(fp_, " DCD %s\n", name);
+ }
+ // The default relocation entry generated by MSVC armasm64.exe for DCD
+ // directive is IMAGE_REL_ARM64_ADDR64 which represents relocation for
+ // 64-bit pointer instead of 32-bit RVA. Append RELOC with
+ // IMAGE_REL_ARM64_ADDR32NB(2) to generate correct relocation entry for
+ // 32-bit RVA.
+ fprintf(fp_, " RELOC 2\n");
} else {
- fprintf(fp_, "DD IMAGEREL %s\n", name);
+ if (offset > 0) {
+ fprintf(fp_, "DD IMAGEREL %s+%llu\n", name, offset);
+ } else {
+ fprintf(fp_, "DD IMAGEREL %s\n", name);
+ }
}
}
void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
- fprintf(fp_, "PUBLIC %s%s\n", SYMBOL_PREFIX, name);
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " EXPORT %s%s\n", SYMBOL_PREFIX, name);
+ } else {
+ fprintf(fp_, "PUBLIC %s%s\n", SYMBOL_PREFIX, name);
+ }
}
void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
- // Diverges from other platforms due to compile error
- // 'invalid combination with segment alignment'.
- fprintf(fp_, "ALIGN 4\n");
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " ALIGN %d\n", ARM64_CODE_ALIGNMENT);
+ } else {
+ // Diverges from other platforms due to compile error
+ // 'invalid combination with segment alignment'.
+ fprintf(fp_, "ALIGN 4\n");
+ }
}
void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
- fprintf(fp_, "ALIGN 4\n");
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " ALIGN %d\n", ARM64_DATA_ALIGNMENT);
+
+ } else {
+ fprintf(fp_, "ALIGN 4\n");
+ }
}
void PlatformEmbeddedFileWriterWin::Comment(const char* string) {
@@ -408,8 +484,13 @@ void PlatformEmbeddedFileWriterWin::Comment(const char* string) {
}
void PlatformEmbeddedFileWriterWin::DeclareLabel(const char* name) {
- fprintf(fp_, "%s%s LABEL %s\n", SYMBOL_PREFIX, name,
- DirectiveAsString(kByte));
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, "%s%s\t", SYMBOL_PREFIX, name);
+
+ } else {
+ fprintf(fp_, "%s%s LABEL %s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(kByte));
+ }
}
void PlatformEmbeddedFileWriterWin::SourceInfo(int fileid, const char* filename,
@@ -421,19 +502,36 @@ void PlatformEmbeddedFileWriterWin::SourceInfo(int fileid, const char* filename,
// TODO(mmarchini): investigate emitting size annotations for Windows
void PlatformEmbeddedFileWriterWin::DeclareFunctionBegin(const char* name,
uint32_t size) {
- fprintf(fp_, "%s%s PROC\n", SYMBOL_PREFIX, name);
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, "%s%s FUNCTION\n", SYMBOL_PREFIX, name);
+
+ } else {
+ fprintf(fp_, "%s%s PROC\n", SYMBOL_PREFIX, name);
+ }
}
void PlatformEmbeddedFileWriterWin::DeclareFunctionEnd(const char* name) {
- fprintf(fp_, "%s%s ENDP\n", SYMBOL_PREFIX, name);
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " ENDFUNC\n");
+
+ } else {
+ fprintf(fp_, "%s%s ENDP\n", SYMBOL_PREFIX, name);
+ }
}
int PlatformEmbeddedFileWriterWin::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0%" PRIx64 "h", value);
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+
+ } else {
+ return fprintf(fp_, "0%" PRIx64 "h", value);
+ }
}
void PlatformEmbeddedFileWriterWin::FilePrologue() {
- if (target_arch_ != EmbeddedTargetArch::kX64) {
+ if (target_arch_ != EmbeddedTargetArch::kArm64 &&
+ target_arch_ != EmbeddedTargetArch::kX64) {
+ // x86 falls into this case
fprintf(fp_, ".MODEL FLAT\n");
}
}
@@ -441,110 +539,14 @@ void PlatformEmbeddedFileWriterWin::FilePrologue() {
void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
int fileid, const char* filename) {}
-void PlatformEmbeddedFileWriterWin::FileEpilogue() { fprintf(fp_, "END\n"); }
-
-int PlatformEmbeddedFileWriterWin::IndentedDataDirective(
- DataDirective directive) {
- return fprintf(fp_, " %s ", DirectiveAsString(directive));
-}
-
-// Windows, MSVC, arm/arm64.
-// -----------------------------------------------------------------------------
-
-#elif defined(V8_ASSEMBLER_IS_MARMASM)
-
-// The AARCH64 ABI requires instructions be 4-byte-aligned and Windows does
-// not have a stricter alignment requirement (see the TEXTAREA macro of
-// kxarm64.h in the Windows SDK), so code is 4-byte-aligned.
-// The data fields in the emitted assembly tend to be accessed with 8-byte
-// LDR instructions, so data is 8-byte-aligned.
-//
-// armasm64's warning A4228 states
-// Alignment value exceeds AREA alignment; alignment not guaranteed
-// To ensure that ALIGN directives are honored, their values are defined as
-// equal to their corresponding AREA's ALIGN attributes.
-
-#define ARM64_DATA_ALIGNMENT_POWER (3)
-#define ARM64_DATA_ALIGNMENT (1 << ARM64_DATA_ALIGNMENT_POWER)
-#define ARM64_CODE_ALIGNMENT_POWER (2)
-#define ARM64_CODE_ALIGNMENT (1 << ARM64_CODE_ALIGNMENT_POWER)
-
-void PlatformEmbeddedFileWriterWin::SectionText() {
- fprintf(fp_, " AREA |.text|, CODE, ALIGN=%d, READONLY\n",
- ARM64_CODE_ALIGNMENT_POWER);
-}
-
-void PlatformEmbeddedFileWriterWin::SectionData() {
- fprintf(fp_, " AREA |.data|, DATA, ALIGN=%d, READWRITE\n",
- ARM64_DATA_ALIGNMENT_POWER);
-}
-
-void PlatformEmbeddedFileWriterWin::SectionRoData() {
- fprintf(fp_, " AREA |.rodata|, DATA, ALIGN=%d, READONLY\n",
- ARM64_DATA_ALIGNMENT_POWER);
-}
-
-void PlatformEmbeddedFileWriterWin::DeclareUint32(const char* name,
- uint32_t value) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
- value);
-}
-
-void PlatformEmbeddedFileWriterWin::DeclarePointerToSymbol(const char* name,
- const char* target) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
- DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
-}
-
-void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
- fprintf(fp_, " EXPORT %s%s\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
- fprintf(fp_, " ALIGN %d\n", ARM64_CODE_ALIGNMENT);
-}
-
-void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
- fprintf(fp_, " ALIGN %d\n", ARM64_DATA_ALIGNMENT);
-}
-
-void PlatformEmbeddedFileWriterWin::Comment(const char* string) {
- fprintf(fp_, "; %s\n", string);
-}
-
-void PlatformEmbeddedFileWriterWin::DeclareLabel(const char* name) {
- fprintf(fp_, "%s%s\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformEmbeddedFileWriterWin::SourceInfo(int fileid, const char* filename,
- int line) {
- // TODO(mvstanton): output source information for MSVC.
- // Its syntax is #line <line> "<filename>"
-}
-
-// TODO(mmarchini): investigate emitting size annotations for Windows
-void PlatformEmbeddedFileWriterWin::DeclareFunctionBegin(const char* name,
- uint32_t size) {
- fprintf(fp_, "%s%s FUNCTION\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformEmbeddedFileWriterWin::DeclareFunctionEnd(const char* name) {
- fprintf(fp_, " ENDFUNC\n");
-}
-
-int PlatformEmbeddedFileWriterWin::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
+void PlatformEmbeddedFileWriterWin::FileEpilogue() {
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ fprintf(fp_, " END\n");
+ } else {
+ fprintf(fp_, "END\n");
+ }
}
-void PlatformEmbeddedFileWriterWin::FilePrologue() {}
-
-void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
- int fileid, const char* filename) {}
-
-void PlatformEmbeddedFileWriterWin::FileEpilogue() { fprintf(fp_, " END\n"); }
-
int PlatformEmbeddedFileWriterWin::IndentedDataDirective(
DataDirective directive) {
return fprintf(fp_, " %s ", DirectiveAsString(directive));
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h
index 907cb84ac4..0fcb48dc30 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h
@@ -66,6 +66,7 @@ class PlatformEmbeddedFileWriterWin : public PlatformEmbeddedFileWriterBase {
private:
void DeclareSymbolGlobal(const char* name);
+ const char* DirectiveAsString(DataDirective directive);
private:
const EmbeddedTargetArch target_arch_;
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index ab6ddf1d0c..cca8d42240 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -33,7 +33,6 @@ ObjectDeserializer::DeserializeSharedFunctionInfo(
MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
Initialize(isolate);
-
if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
DCHECK(deserializing_user_code());
@@ -82,7 +81,7 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
Factory* factory = isolate()->factory();
for (Handle<Script> script : new_scripts()) {
// Assign a new script id to avoid collision.
- script->set_id(isolate()->heap()->NextScriptId());
+ script->set_id(isolate()->GetNextScriptId());
LogScriptEvents(*script);
// Add script to list.
Handle<WeakArrayList> list = factory->script_list();
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index c362fdb0ce..56b29a2ac7 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -103,8 +103,7 @@ void PartialSerializer::SerializeObject(HeapObject obj) {
// Clear InterruptBudget when serializing FeedbackCell.
if (obj.IsFeedbackCell()) {
- FeedbackCell::cast(obj).set_interrupt_budget(
- FeedbackCell::GetInitialInterruptBudget());
+ FeedbackCell::cast(obj).SetInitialInterruptBudget();
}
if (SerializeJSObjectWithEmbedderFields(obj)) {
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 4ddaf37773..41e6188154 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -19,6 +19,7 @@ namespace internal {
ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate)
: RootsSerializer(isolate, RootIndex::kFirstReadOnlyRoot) {
STATIC_ASSERT(RootIndex::kFirstReadOnlyRoot == RootIndex::kFirstRoot);
+ allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
ReadOnlySerializer::~ReadOnlySerializer() {
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index 1703af7717..9218d4eaa9 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -100,7 +100,6 @@ void SerializedData::AllocateData(uint32_t size) {
data_ = NewArray<byte>(size);
size_ = size;
owns_data_ = true;
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
}
// static
@@ -146,7 +145,7 @@ void SerializerDeserializer::RestoreExternalReferenceRedirectors(
}
}
-V8_EXPORT_PRIVATE extern uint32_t Checksum(Vector<const byte> payload) {
+uint32_t Checksum(Vector<const byte> payload) {
#ifdef MEMORY_SANITIZER
// Computing the checksum includes padding bytes for objects like strings.
// Mark every object as initialized in the code serializer.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index fe7feb0bf0..9ad8d091cd 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -397,6 +397,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
// We cannot store byte_length larger than Smi range in the snapshot.
CHECK_LE(buffer.byte_length(), Smi::kMaxValue);
int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
+ ArrayBufferExtension* extension = buffer.extension();
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
@@ -405,9 +406,16 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
// a backing store address. On deserialization we re-set data pointer
// to proper value.
buffer.set_backing_store(reinterpret_cast<void*>(static_cast<size_t>(ref)));
+
+ // Ensure deterministic output by setting extension to null during
+ // serialization.
+ buffer.set_extension(nullptr);
}
+
SerializeObject();
+
buffer.set_backing_store(backing_store);
+ buffer.set_extension(extension);
}
void Serializer::ObjectSerializer::SerializeExternalString() {
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 8e0c28c6d6..8e80b0b0b0 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -14,9 +14,21 @@
#include "src/utils/memcopy.h"
#include "src/utils/version.h"
+#ifdef V8_SNAPSHOT_COMPRESSION
+#include "src/snapshot/snapshot-compression.h"
+#endif
+
namespace v8 {
namespace internal {
+SnapshotData MaybeDecompress(const Vector<const byte>& snapshot_data) {
+#ifdef V8_SNAPSHOT_COMPRESSION
+ return SnapshotCompression::Decompress(snapshot_data);
+#else
+ return SnapshotData(snapshot_data);
+#endif
+}
+
#ifdef DEBUG
bool Snapshot::SnapshotIsValid(const v8::StartupData* snapshot_blob) {
return Snapshot::ExtractNumContexts(snapshot_blob) > 0;
@@ -43,9 +55,11 @@ bool Snapshot::Initialize(Isolate* isolate) {
CheckVersion(blob);
CHECK(VerifyChecksum(blob));
Vector<const byte> startup_data = ExtractStartupData(blob);
- SnapshotData startup_snapshot_data(startup_data);
Vector<const byte> read_only_data = ExtractReadOnlyData(blob);
- SnapshotData read_only_snapshot_data(read_only_data);
+
+ SnapshotData startup_snapshot_data(MaybeDecompress(startup_data));
+ SnapshotData read_only_snapshot_data(MaybeDecompress(read_only_data));
+
StartupDeserializer startup_deserializer(&startup_snapshot_data);
ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot_data);
startup_deserializer.SetRehashability(ExtractRehashability(blob));
@@ -73,7 +87,7 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
bool can_rehash = ExtractRehashability(blob);
Vector<const byte> context_data =
ExtractContextData(blob, static_cast<uint32_t>(context_index));
- SnapshotData snapshot_data(context_data);
+ SnapshotData snapshot_data(MaybeDecompress(context_data));
MaybeHandle<Context> maybe_result = PartialDeserializer::DeserializeContext(
isolate, &snapshot_data, can_rehash, global_proxy,
@@ -116,24 +130,49 @@ void ProfileDeserialization(
}
v8::StartupData Snapshot::CreateSnapshotBlob(
- const SnapshotData* startup_snapshot,
- const SnapshotData* read_only_snapshot,
- const std::vector<SnapshotData*>& context_snapshots, bool can_be_rehashed) {
- uint32_t num_contexts = static_cast<uint32_t>(context_snapshots.size());
+ const SnapshotData* startup_snapshot_in,
+ const SnapshotData* read_only_snapshot_in,
+ const std::vector<SnapshotData*>& context_snapshots_in,
+ bool can_be_rehashed) {
+ // Have these separate from snapshot_in for compression, since we need to
+ // access the compressed data as well as the uncompressed reservations.
+ const SnapshotData* startup_snapshot;
+ const SnapshotData* read_only_snapshot;
+ const std::vector<SnapshotData*>* context_snapshots;
+#ifdef V8_SNAPSHOT_COMPRESSION
+ SnapshotData startup_compressed(
+ SnapshotCompression::Compress(startup_snapshot_in));
+ SnapshotData read_only_compressed(
+ SnapshotCompression::Compress(read_only_snapshot_in));
+ startup_snapshot = &startup_compressed;
+ read_only_snapshot = &read_only_compressed;
+ std::vector<SnapshotData> context_snapshots_compressed;
+ context_snapshots_compressed.reserve(context_snapshots_in.size());
+ std::vector<SnapshotData*> context_snapshots_compressed_ptrs;
+ for (unsigned int i = 0; i < context_snapshots_in.size(); ++i) {
+ context_snapshots_compressed.push_back(
+ SnapshotCompression::Compress(context_snapshots_in[i]));
+ context_snapshots_compressed_ptrs.push_back(
+ &context_snapshots_compressed[i]);
+ }
+ context_snapshots = &context_snapshots_compressed_ptrs;
+#else
+ startup_snapshot = startup_snapshot_in;
+ read_only_snapshot = read_only_snapshot_in;
+ context_snapshots = &context_snapshots_in;
+#endif
+
+ uint32_t num_contexts = static_cast<uint32_t>(context_snapshots->size());
uint32_t startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
uint32_t total_length = startup_snapshot_offset;
- DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
- DCHECK(IsAligned(total_length, kPointerAlignment));
total_length += static_cast<uint32_t>(read_only_snapshot->RawData().length());
- DCHECK(IsAligned(total_length, kPointerAlignment));
- for (const auto context_snapshot : context_snapshots) {
+ for (const auto context_snapshot : *context_snapshots) {
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
- DCHECK(IsAligned(total_length, kPointerAlignment));
}
- ProfileDeserialization(read_only_snapshot, startup_snapshot,
- context_snapshots);
+ ProfileDeserialization(read_only_snapshot_in, startup_snapshot_in,
+ context_snapshots_in);
char* data = new char[total_length];
// Zero out pre-payload data. Part of that is only used for padding.
@@ -157,7 +196,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
if (FLAG_profile_deserialization) {
PrintF("Snapshot blob consists of:\n%10d bytes in %d chunks for startup\n",
payload_length,
- static_cast<uint32_t>(startup_snapshot->Reservations().size()));
+ static_cast<uint32_t>(startup_snapshot_in->Reservations().size()));
}
payload_offset += payload_length;
@@ -176,15 +215,17 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
// Partial snapshots (context-specific data).
for (uint32_t i = 0; i < num_contexts; i++) {
SetHeaderValue(data, ContextSnapshotOffsetOffset(i), payload_offset);
- SnapshotData* context_snapshot = context_snapshots[i];
+ SnapshotData* context_snapshot = (*context_snapshots)[i];
payload_length = context_snapshot->RawData().length();
CopyBytes(
data + payload_offset,
reinterpret_cast<const char*>(context_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF("%10d bytes in %d chunks for context #%d\n", payload_length,
- static_cast<uint32_t>(context_snapshot->Reservations().size()), i);
+ PrintF(
+ "%10d bytes in %d chunks for context #%d\n", payload_length,
+ static_cast<uint32_t>(context_snapshots_in[i]->Reservations().size()),
+ i);
}
payload_offset += payload_length;
}
@@ -310,7 +351,6 @@ SnapshotData::SnapshotData(const Serializer* serializer) {
uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
uint32_t size =
padded_payload_offset + static_cast<uint32_t>(payload->size());
- DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
@@ -358,13 +398,11 @@ bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
v8::Context::Scope context_scope(context);
v8::TryCatch try_catch(isolate);
v8::Local<v8::String> source_string;
- if (!v8::String::NewFromUtf8(isolate, utf8_source, v8::NewStringType::kNormal)
- .ToLocal(&source_string)) {
+ if (!v8::String::NewFromUtf8(isolate, utf8_source).ToLocal(&source_string)) {
return false;
}
v8::Local<v8::String> resource_name =
- v8::String::NewFromUtf8(isolate, name, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, name).ToLocalChecked();
v8::ScriptOrigin origin(resource_name);
v8::ScriptCompiler::Source source(source_string, origin);
v8::Local<v8::Script> script;
diff --git a/deps/v8/src/snapshot/snapshot-compression.cc b/deps/v8/src/snapshot/snapshot-compression.cc
new file mode 100644
index 0000000000..dea16bfa34
--- /dev/null
+++ b/deps/v8/src/snapshot/snapshot-compression.cc
@@ -0,0 +1,95 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/snapshot-compression.h"
+
+#include "src/utils/memcopy.h"
+#include "third_party/zlib/google/compression_utils_portable.h"
+
+namespace v8 {
+namespace internal {
+
+uint32_t GetUncompressedSize(const Bytef* compressed_data) {
+ uint32_t size;
+ MemCopy(&size, compressed_data, sizeof(size));
+ return size;
+}
+
+SnapshotData SnapshotCompression::Compress(
+ const SnapshotData* uncompressed_data) {
+ SnapshotData snapshot_data;
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
+ static_assert(sizeof(Bytef) == 1, "");
+ const uLongf input_size =
+ static_cast<uLongf>(uncompressed_data->RawData().size());
+ uint32_t payload_length =
+ static_cast<uint32_t>(uncompressed_data->RawData().size());
+
+ uLongf compressed_data_size = compressBound(input_size);
+
+ // Allocating >= the final amount we will need.
+ snapshot_data.AllocateData(
+ static_cast<uint32_t>(sizeof(payload_length) + compressed_data_size));
+
+ byte* compressed_data = const_cast<byte*>(snapshot_data.RawData().begin());
+ // Since we are doing raw compression (no zlib or gzip headers), we need to
+ // manually store the uncompressed size.
+ MemCopy(compressed_data, &payload_length, sizeof(payload_length));
+
+ CHECK_EQ(zlib_internal::CompressHelper(
+ zlib_internal::ZRAW, compressed_data + sizeof(payload_length),
+ &compressed_data_size,
+ bit_cast<const Bytef*>(uncompressed_data->RawData().begin()),
+ input_size, Z_DEFAULT_COMPRESSION, nullptr, nullptr),
+ Z_OK);
+
+ // Reallocating to exactly the size we need.
+ snapshot_data.Resize(static_cast<uint32_t>(compressed_data_size) +
+ sizeof(payload_length));
+ DCHECK_EQ(payload_length,
+ GetUncompressedSize(snapshot_data.RawData().begin()));
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Compressing %d bytes took %0.3f ms]\n", payload_length, ms);
+ }
+ return snapshot_data;
+}
+
+SnapshotData SnapshotCompression::Decompress(
+ Vector<const byte> compressed_data) {
+ SnapshotData snapshot_data;
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
+
+ const Bytef* input_bytef = bit_cast<const Bytef*>(compressed_data.begin());
+
+ // Since we are doing raw compression (no zlib or gzip headers), we need to
+ // manually retrieve the uncompressed size.
+ uint32_t uncompressed_payload_length = GetUncompressedSize(input_bytef);
+ input_bytef += sizeof(uncompressed_payload_length);
+
+ snapshot_data.AllocateData(uncompressed_payload_length);
+
+ uLongf uncompressed_size = uncompressed_payload_length;
+ CHECK_EQ(zlib_internal::UncompressHelper(
+ zlib_internal::ZRAW,
+ bit_cast<Bytef*>(snapshot_data.RawData().begin()),
+ &uncompressed_size, input_bytef,
+ static_cast<uLong>(compressed_data.size() -
+ sizeof(uncompressed_payload_length))),
+ Z_OK);
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Decompressing %d bytes took %0.3f ms]\n",
+ uncompressed_payload_length, ms);
+ }
+ return snapshot_data;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-compression.h b/deps/v8/src/snapshot/snapshot-compression.h
new file mode 100644
index 0000000000..59c21feb74
--- /dev/null
+++ b/deps/v8/src/snapshot/snapshot-compression.h
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SNAPSHOT_COMPRESSION_H_
+#define V8_SNAPSHOT_SNAPSHOT_COMPRESSION_H_
+
+#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/serializer.h"
+#include "src/snapshot/snapshot.h"
+#include "src/utils/vector.h"
+
+namespace v8 {
+namespace internal {
+
+class SnapshotCompression : public AllStatic {
+ public:
+ V8_EXPORT_PRIVATE static SnapshotData Compress(
+ const SnapshotData* uncompressed_data);
+ V8_EXPORT_PRIVATE static SnapshotData Decompress(
+ Vector<const byte> compressed_data);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_SNAPSHOT_COMPRESSION_H_
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 156d873019..d9f05c59a8 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -16,6 +16,7 @@ namespace internal {
// Forward declarations.
class Isolate;
class PartialSerializer;
+class SnapshotCompression;
class StartupSerializer;
// Wrapper around reservation sizes and the serialization payload.
@@ -37,6 +38,15 @@ class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
}
protected:
+ // Empty constructor used by SnapshotCompression so it can manually allocate
+ // memory.
+ SnapshotData() : SerializedData() {}
+ friend class SnapshotCompression;
+
+ // Resize used by SnapshotCompression so it can shrink the compressed
+ // SnapshotData.
+ void Resize(uint32_t size) { size_ = size; }
+
// The data header consists of uint32_t-sized entries:
// [0] magic number and (internal) external reference count
// [1] number of reservation size entries
@@ -77,9 +87,9 @@ class Snapshot : public AllStatic {
// ---------------- Serialization ----------------
static v8::StartupData CreateSnapshotBlob(
- const SnapshotData* startup_snapshot,
- const SnapshotData* read_only_snapshot,
- const std::vector<SnapshotData*>& context_snapshots,
+ const SnapshotData* startup_snapshot_in,
+ const SnapshotData* read_only_snapshot_in,
+ const std::vector<SnapshotData*>& context_snapshots_in,
bool can_be_rehashed);
#ifdef DEBUG
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 8d99238d1e..7ef6ac6168 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -54,6 +54,10 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
isolate->heap()->set_allocation_sites_list(
ReadOnlyRoots(isolate).undefined_value());
}
+ isolate->heap()->set_dirty_js_finalization_registries_list(
+ ReadOnlyRoots(isolate).undefined_value());
+ isolate->heap()->set_dirty_js_finalization_registries_list_tail(
+ ReadOnlyRoots(isolate).undefined_value());
isolate->builtins()->MarkInitialized();
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 4d6ce78b59..8b95ca6b6a 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -23,6 +23,7 @@ StartupSerializer::StartupSerializer(Isolate* isolate,
ReadOnlySerializer* read_only_serializer)
: RootsSerializer(isolate, RootIndex::kFirstStrongRoot),
read_only_serializer_(read_only_serializer) {
+ allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
InitializeCodeAddressMap();
}
@@ -169,6 +170,15 @@ void StartupSerializer::SerializeUsingPartialSnapshotCache(
sink->PutInt(cache_index, "partial_snapshot_cache_index");
}
+void StartupSerializer::CheckNoDirtyFinalizationRegistries() {
+ Isolate* isolate = this->isolate();
+ CHECK(isolate->heap()->dirty_js_finalization_registries_list().IsUndefined(
+ isolate));
+ CHECK(
+ isolate->heap()->dirty_js_finalization_registries_list_tail().IsUndefined(
+ isolate));
+}
+
void SerializedHandleChecker::AddToSet(FixedArray serialized) {
int length = serialized.length();
for (int i = 0; i < length; i++) serialized_.insert(serialized.get(i));
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 5ab98ed8ba..50c023852f 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -41,6 +41,10 @@ class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
void SerializeUsingPartialSnapshotCache(SnapshotByteSink* sink,
HeapObject obj);
+ // The per-heap dirty FinalizationRegistry list is weak and not serialized. No
+ // JSFinalizationRegistries should be used during startup.
+ void CheckNoDirtyFinalizationRegistries();
+
private:
void SerializeObject(HeapObject o) override;
diff --git a/deps/v8/src/strings/string-hasher-inl.h b/deps/v8/src/strings/string-hasher-inl.h
index 2857b27731..cac574637b 100644
--- a/deps/v8/src/strings/string-hasher-inl.h
+++ b/deps/v8/src/strings/string-hasher-inl.h
@@ -35,18 +35,13 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
uint32_t StringHasher::GetTrivialHash(int length) {
DCHECK_GT(length, String::kMaxHashCalcLength);
- // The hash of a large string is simply computed from the length. We don't
- // have quite enough bits, so we drop the least significant bit.
- // TODO(9904): Free up one bit, so we don't have to drop anything here.
- constexpr int kDroppedBits = 1;
- // Ensure that the max length after dropping bits is small enough to be
- // shifted without losing information.
- STATIC_ASSERT(base::bits::CountLeadingZeros32(String::kMaxLength) +
- kDroppedBits >=
+ // The hash of a large string is simply computed from the length.
+ // Ensure that the max length is small enough to be shifted without losing
+ // information.
+ STATIC_ASSERT(base::bits::CountLeadingZeros32(String::kMaxLength) >=
String::kHashShift);
- uint32_t hash = static_cast<uint32_t>(length) >> kDroppedBits;
- return (hash << String::kHashShift) | String::kIsNotArrayIndexMask |
- String::kIsNotIntegerIndexMask;
+ uint32_t hash = static_cast<uint32_t>(length);
+ return (hash << String::kHashShift) | String::kIsNotIntegerIndexMask;
}
template <typename char_t>
@@ -93,8 +88,17 @@ uint32_t StringHasher::HashSequentialString(const char_t* chars_raw, int length,
}
running_hash = AddCharacterCore(running_hash, *chars++);
}
- return (GetHashCore(running_hash) << String::kHashShift) |
- String::kIsNotArrayIndexMask | is_integer_index;
+ uint32_t hash = (GetHashCore(running_hash) << String::kHashShift) |
+ is_integer_index;
+ if (Name::ContainsCachedArrayIndex(hash)) {
+ // The hash accidentally looks like a cached index. Fix that by
+ // setting a bit that looks like a longer-than-cacheable string
+ // length.
+ hash |= (String::kMaxCachedArrayIndexLength + 1)
+ << String::ArrayIndexLengthBits::kShift;
+ }
+ DCHECK(!Name::ContainsCachedArrayIndex(hash));
+ return hash;
}
#endif
}
@@ -113,7 +117,7 @@ uint32_t StringHasher::HashSequentialString(const char_t* chars_raw, int length,
}
return (GetHashCore(running_hash) << String::kHashShift) |
- String::kIsNotArrayIndexMask | String::kIsNotIntegerIndexMask;
+ String::kIsNotIntegerIndexMask;
}
std::size_t SeededStringHasher::operator()(const char* name) const {
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 0b7cf7af9b..93e0622aee 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -629,6 +629,8 @@ struct BasicTypeExpression : TypeExpression {
is_constexpr(IsConstexprName(name)),
name(std::move(name)),
generic_arguments(std::move(generic_arguments)) {}
+ BasicTypeExpression(SourcePosition pos, std::string name)
+ : BasicTypeExpression(pos, {}, std::move(name), {}) {}
std::vector<std::string> namespace_qualification;
bool is_constexpr;
std::string name;
@@ -838,7 +840,7 @@ struct InstanceTypeConstraints {
struct AbstractTypeDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AbstractTypeDeclaration)
AbstractTypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
- base::Optional<Identifier*> extends,
+ base::Optional<TypeExpression*> extends,
base::Optional<std::string> generates)
: TypeDeclaration(kKind, pos, name),
is_constexpr(IsConstexprName(name->value)),
@@ -847,7 +849,7 @@ struct AbstractTypeDeclaration : TypeDeclaration {
generates(std::move(generates)) {}
bool is_constexpr;
bool transient;
- base::Optional<Identifier*> extends;
+ base::Optional<TypeExpression*> extends;
base::Optional<std::string> generates;
};
diff --git a/deps/v8/src/torque/cfg.cc b/deps/v8/src/torque/cfg.cc
index 9726cbfcb3..7ac1662cd4 100644
--- a/deps/v8/src/torque/cfg.cc
+++ b/deps/v8/src/torque/cfg.cc
@@ -209,6 +209,34 @@ void CfgAssembler::OptimizeCfg() {
[&](Block* b) { return predecessor_count[b->id()] == 0; });
}
+void CfgAssembler::ComputeInputDefinitions() {
+ Worklist<Block*> worklist;
+
+ // Setup start block.
+ Stack<DefinitionLocation> parameter_defs;
+ for (std::size_t i = 0; i < cfg_.ParameterCount(); ++i) {
+ parameter_defs.Push(DefinitionLocation::Parameter(i));
+ }
+ cfg_.start()->MergeInputDefinitions(parameter_defs, &worklist);
+
+ // Run fixpoint algorithm.
+ while (!worklist.IsEmpty()) {
+ Block* block = worklist.Dequeue();
+ Stack<DefinitionLocation> definitions = block->InputDefinitions();
+
+ // Propagate through block's instructions.
+ for (const auto& instruction : block->instructions()) {
+ instruction.RecomputeDefinitionLocations(&definitions, &worklist);
+ }
+ }
+
+ for (Block* block : cfg_.blocks()) {
+ DCHECK_IMPLIES(!block->IsDead(), block->InputDefinitions().Size() ==
+ block->InputTypes().Size());
+ USE(block);
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/cfg.h b/deps/v8/src/torque/cfg.h
index e75172c1e0..e45cb75073 100644
--- a/deps/v8/src/torque/cfg.h
+++ b/deps/v8/src/torque/cfg.h
@@ -53,10 +53,42 @@ class Block {
size_t id() const { return id_; }
bool IsDeferred() const { return is_deferred_; }
+ void MergeInputDefinitions(const Stack<DefinitionLocation>& input_definitions,
+ Worklist<Block*>* worklist) {
+ if (!input_definitions_) {
+ input_definitions_ = input_definitions;
+ if (worklist) worklist->Enqueue(this);
+ return;
+ }
+
+ DCHECK_EQ(input_definitions_->Size(), input_definitions.Size());
+ bool changed = false;
+ for (BottomOffset i = 0; i < input_definitions.AboveTop(); ++i) {
+ auto& current = input_definitions_->Peek(i);
+ auto& input = input_definitions.Peek(i);
+ if (current == input) continue;
+ if (current == DefinitionLocation::Phi(this, i.offset)) continue;
+ input_definitions_->Poke(i, DefinitionLocation::Phi(this, i.offset));
+ changed = true;
+ }
+
+ if (changed && worklist) worklist->Enqueue(this);
+ }
+ bool HasInputDefinitions() const {
+ return input_definitions_ != base::nullopt;
+ }
+ const Stack<DefinitionLocation>& InputDefinitions() const {
+ DCHECK(HasInputDefinitions());
+ return *input_definitions_;
+ }
+
+ bool IsDead() const { return !HasInputDefinitions(); }
+
private:
ControlFlowGraph* cfg_;
std::vector<Instruction> instructions_;
base::Optional<Stack<const Type*>> input_types_;
+ base::Optional<Stack<DefinitionLocation>> input_definitions_;
const size_t id_;
bool is_deferred_;
};
@@ -95,6 +127,9 @@ class ControlFlowGraph {
}
const std::vector<Block*>& blocks() const { return placed_blocks_; }
size_t NumberOfBlockIds() const { return next_block_id_; }
+ std::size_t ParameterCount() const {
+ return start_ ? start_->InputTypes().Size() : 0;
+ }
private:
std::list<Block> blocks_;
@@ -116,6 +151,7 @@ class CfgAssembler {
}
OptimizeCfg();
DCHECK(CfgIsComplete());
+ ComputeInputDefinitions();
return cfg_;
}
@@ -167,6 +203,7 @@ class CfgAssembler {
void PrintCurrentStack(std::ostream& s) { s << "stack: " << current_stack_; }
void OptimizeCfg();
+ void ComputeInputDefinitions();
private:
friend class CfgAssemblerScopedTemporaryBlock;
diff --git a/deps/v8/src/torque/class-debug-reader-generator.cc b/deps/v8/src/torque/class-debug-reader-generator.cc
index e4f9ddcd60..52646dff26 100644
--- a/deps/v8/src/torque/class-debug-reader-generator.cc
+++ b/deps/v8/src/torque/class-debug-reader-generator.cc
@@ -44,14 +44,22 @@ class ValueTypeFieldIterator {
int shift_bits;
};
const Result operator*() const {
- if (const StructType* struct_type = StructType::DynamicCast(type_)) {
- const auto& field = struct_type->fields()[index_];
+ if (auto struct_type = type_->StructSupertype()) {
+ const auto& field = (*struct_type)->fields()[index_];
return {field.name_and_type, field.pos, *field.offset, 0, 0};
}
+ const Type* type = type_;
+ int bitfield_start_offset = 0;
+ if (const auto type_wrapped_in_smi =
+ Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
+ type = *type_wrapped_in_smi;
+ bitfield_start_offset = kSmiTagSize + kSmiShiftSize;
+ }
if (const BitFieldStructType* bit_field_struct_type =
- BitFieldStructType::DynamicCast(type_)) {
+ BitFieldStructType::DynamicCast(type)) {
const auto& field = bit_field_struct_type->fields()[index_];
- return {field.name_and_type, field.pos, 0, field.num_bits, field.offset};
+ return {field.name_and_type, field.pos, 0, field.num_bits,
+ field.offset + bitfield_start_offset};
}
UNREACHABLE();
}
@@ -79,12 +87,17 @@ class ValueTypeFieldsRange {
ValueTypeFieldIterator begin() { return {type_, 0}; }
ValueTypeFieldIterator end() {
size_t index = 0;
- const StructType* struct_type = StructType::DynamicCast(type_);
- if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
- index = struct_type->fields().size();
+ base::Optional<const StructType*> struct_type = type_->StructSupertype();
+ if (struct_type && *struct_type != TypeOracle::GetFloat64OrHoleType()) {
+ index = (*struct_type)->fields().size();
+ }
+ const Type* type = type_;
+ if (const auto type_wrapped_in_smi =
+ Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
+ type = *type_wrapped_in_smi;
}
if (const BitFieldStructType* bit_field_struct_type =
- BitFieldStructType::DynamicCast(type_)) {
+ BitFieldStructType::DynamicCast(type)) {
index = bit_field_struct_type->fields().size();
}
return {type_, index};
@@ -115,23 +128,21 @@ class DebugFieldType {
if (IsTagged()) {
return storage == kAsStoredInHeap ? "i::Tagged_t" : "uintptr_t";
}
- // Note that we need constexpr names to resolve correctly in the global
- // namespace, because we're passing them as strings to a debugging
- // extension. We can verify this during build of the debug helper, because
- // we use this type for a local variable below, and generate this code in
- // a disjoint namespace. However, we can't emit a useful error at this
- // point. Instead we'll emit a comment that might be helpful.
+
+ // We can't emit a useful error at this point if the constexpr type name is
+ // wrong, but we can include a comment that might be helpful.
return GetOriginalType(storage) +
- " /*Failing? Ensure constexpr type name is fully qualified and "
- "necessary #includes are in debug-helper-internal.h*/";
+ " /*Failing? Ensure constexpr type name is correct, and the "
+ "necessary #include is in any .tq file*/";
}
// Returns the type that should be used to represent a field's type to
// debugging tools that have full V8 symbols. The types returned from this
- // method are fully qualified and may refer to object types that are not
- // included in the compilation of the debug helper library.
+ // method are resolveable in the v8::internal namespace and may refer to
+ // object types that are not included in the compilation of the debug helper
+ // library.
std::string GetOriginalType(TypeStorage storage) const {
- if (name_and_type_.type->IsStructType()) {
+ if (name_and_type_.type->StructSupertype()) {
// There's no meaningful type we could use here, because the V8 symbols
// don't have any definition of a C++ struct matching this struct type.
return "";
@@ -151,6 +162,26 @@ class DebugFieldType {
return name_and_type_.type->GetConstexprGeneratedTypeName();
}
+ // Returns a C++ expression that evaluates to a string (type `const char*`)
+ // containing the name of the field's type. The types returned from this
+ // method are resolveable in the v8::internal namespace and may refer to
+ // object types that are not included in the compilation of the debug helper
+ // library.
+ std::string GetTypeString(TypeStorage storage) const {
+ if (IsTagged() || name_and_type_.type->IsStructType()) {
+ // Wrap up the original type in a string literal.
+ return "\"" + GetOriginalType(storage) + "\"";
+ }
+
+ // We require constexpr type names to be resolvable in the v8::internal
+ // namespace, according to the contract in debug-helper.h. In order to
+ // verify at compile time that constexpr type names are resolvable, we use
+ // the type name as a dummy template parameter to a function that just
+ // returns its parameter.
+ return "CheckTypeName<" + GetValueType(storage) + ">(\"" +
+ GetOriginalType(storage) + "\")";
+ }
+
// Returns the field's size in bytes.
size_t GetSize() const {
auto opt_size = SizeOf(name_and_type_.type);
@@ -228,7 +259,7 @@ void GenerateFieldValueAccessor(const Field& field,
std::ostream& h_contents,
std::ostream& cc_contents) {
// Currently not implemented for struct fields.
- if (field.name_and_type.type->IsStructType()) return;
+ if (field.name_and_type.type->StructSupertype()) return;
DebugFieldType debug_field_type(field);
@@ -243,25 +274,23 @@ void GenerateFieldValueAccessor(const Field& field,
index_offset = " + offset * sizeof(value)";
}
- if (!field.name_and_type.type->IsStructType()) {
- std::string field_value_type = debug_field_type.GetValueType(kUncompressed);
- h_contents << " Value<" << field_value_type << "> " << field_getter
- << "(d::MemoryAccessor accessor " << index_param << ") const;\n";
- cc_contents << "\nValue<" << field_value_type << "> Tq" << class_name
- << "::" << field_getter << "(d::MemoryAccessor accessor"
- << index_param << ") const {\n";
- cc_contents << " " << debug_field_type.GetValueType(kAsStoredInHeap)
- << " value{};\n";
- cc_contents << " d::MemoryAccessResult validity = accessor("
- << address_getter << "()" << index_offset
- << ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n";
- cc_contents << " return {validity, "
- << (debug_field_type.IsTagged()
- ? "EnsureDecompressed(value, address_)"
- : "value")
- << "};\n";
- cc_contents << "}\n";
- }
+ std::string field_value_type = debug_field_type.GetValueType(kUncompressed);
+ h_contents << " Value<" << field_value_type << "> " << field_getter
+ << "(d::MemoryAccessor accessor " << index_param << ") const;\n";
+ cc_contents << "\nValue<" << field_value_type << "> Tq" << class_name
+ << "::" << field_getter << "(d::MemoryAccessor accessor"
+ << index_param << ") const {\n";
+ cc_contents << " " << debug_field_type.GetValueType(kAsStoredInHeap)
+ << " value{};\n";
+ cc_contents << " d::MemoryAccessResult validity = accessor("
+ << address_getter << "()" << index_offset
+ << ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n";
+ cc_contents << " return {validity, "
+ << (debug_field_type.IsTagged()
+ ? "EnsureDecompressed(value, address_)"
+ : "value")
+ << "};\n";
+ cc_contents << "}\n";
}
// Emits a portion of the member function GetProperties that is responsible for
@@ -333,10 +362,9 @@ void GenerateGetPropsChunkForField(const Field& field,
struct_field.pos);
get_props_impl << " " << struct_field_list
<< ".push_back(std::make_unique<StructProperty>(\""
- << struct_field.name_and_type.name << "\", \""
- << struct_field_type.GetOriginalType(kAsStoredInHeap)
- << "\", \""
- << struct_field_type.GetOriginalType(kUncompressed) << "\", "
+ << struct_field.name_and_type.name << "\", "
+ << struct_field_type.GetTypeString(kAsStoredInHeap) << ", "
+ << struct_field_type.GetTypeString(kUncompressed) << ", "
<< struct_field.offset_bytes << ", " << struct_field.num_bits
<< ", " << struct_field.shift_bits << "));\n";
}
@@ -370,11 +398,11 @@ void GenerateGetPropsChunkForField(const Field& field,
}
get_props_impl << " result.push_back(std::make_unique<ObjectProperty>(\""
- << field.name_and_type.name << "\", \""
- << debug_field_type.GetOriginalType(kAsStoredInHeap)
- << "\", \"" << debug_field_type.GetOriginalType(kUncompressed)
- << "\", " << debug_field_type.GetAddressGetter() << "(), "
- << count_value << ", " << debug_field_type.GetSize() << ", "
+ << field.name_and_type.name << "\", "
+ << debug_field_type.GetTypeString(kAsStoredInHeap) << ", "
+ << debug_field_type.GetTypeString(kUncompressed) << ", "
+ << debug_field_type.GetAddressGetter() << "(), " << count_value
+ << ", " << debug_field_type.GetSize() << ", "
<< struct_field_list << ", " << property_kind << "));\n";
}
@@ -524,12 +552,17 @@ void ImplementationVisitor::GenerateClassDebugReaders(
h_contents << "#undef GetBValue\n";
h_contents << "#endif\n\n";
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
+ }
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
cc_contents << "#include \"include/v8-internal.h\"\n\n";
cc_contents << "namespace i = v8::internal;\n\n";
- NamespaceScope h_namespaces(h_contents, {"v8_debug_helper_internal"});
- NamespaceScope cc_namespaces(cc_contents, {"v8_debug_helper_internal"});
+ NamespaceScope h_namespaces(h_contents,
+ {"v8", "internal", "debug_helper_internal"});
+ NamespaceScope cc_namespaces(cc_contents,
+ {"v8", "internal", "debug_helper_internal"});
std::stringstream visitor;
visitor << "\nclass TqObjectVisitor {\n";
@@ -539,8 +572,7 @@ void ImplementationVisitor::GenerateClassDebugReaders(
std::stringstream class_names;
std::unordered_set<const ClassType*> done;
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
+ for (const ClassType* type : TypeOracle::GetClasses()) {
GenerateClassDebugReader(*type, h_contents, cc_contents, visitor,
class_names, &done);
}
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 3559daaccd..9eba568ac9 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -63,13 +63,16 @@ static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
static const char* const TORQUE_INTERNAL_NAMESPACE_STRING = "torque_internal";
-static const char* const REFERENCE_TYPE_STRING = "Reference";
+static const char* const MUTABLE_REFERENCE_TYPE_STRING = "MutableReference";
+static const char* const CONST_REFERENCE_TYPE_STRING = "ConstReference";
static const char* const SLICE_TYPE_STRING = "Slice";
static const char* const WEAK_TYPE_STRING = "Weak";
+static const char* const SMI_TAGGED_TYPE_STRING = "SmiTagged";
static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
"UninitializedIterator";
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
"_generic_type_instantiation_namespace";
+static const char* const FIXED_ARRAY_BASE_TYPE_STRING = "FixedArrayBase";
static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint";
static const char* const ANNOTATION_NO_VERIFIER = "@noVerifier";
@@ -87,6 +90,9 @@ static const char* const ANNOTATION_INSTANCE_TYPE_VALUE =
"@apiExposedInstanceTypeValue";
static const char* const ANNOTATION_IF = "@if";
static const char* const ANNOTATION_IFNOT = "@ifnot";
+static const char* const ANNOTATION_GENERATE_BODY_DESCRIPTOR =
+ "@generateBodyDescriptor";
+static const char* const ANNOTATION_EXPORT_CPP_CLASS = "@export";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -124,6 +130,8 @@ enum class ClassFlag {
kHighestInstanceTypeWithinParent = 1 << 9,
kLowestInstanceTypeWithinParent = 1 << 10,
kUndefinedLayout = 1 << 11,
+ kGenerateBodyDescriptor = 1 << 12,
+ kExport = 1 << 13,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 29e8e1dea2..9716ccbad4 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -6,6 +6,7 @@
#include "src/common/globals.h"
#include "src/torque/type-oracle.h"
+#include "src/torque/types.h"
#include "src/torque/utils.h"
namespace v8 {
@@ -14,24 +15,51 @@ namespace torque {
base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
Stack<std::string> parameters) {
+ for (BottomOffset i = 0; i < parameters.AboveTop(); ++i) {
+ SetDefinitionVariable(DefinitionLocation::Parameter(i.offset),
+ parameters.Peek(i));
+ }
+
for (Block* block : cfg_.blocks()) {
- out_ << " compiler::CodeAssemblerParameterizedLabel<";
- PrintCommaSeparatedList(out_, block->InputTypes(), [](const Type* t) {
- return t->GetGeneratedTNodeTypeName();
- });
- out_ << "> " << BlockName(block) << "(&ca_, compiler::CodeAssemblerLabel::"
- << (block->IsDeferred() ? "kDeferred" : "kNonDeferred") << ");\n";
+ if (block->IsDead()) continue;
+
+ out() << " compiler::CodeAssemblerParameterizedLabel<";
+ bool first = true;
+ DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
+ for (BottomOffset i = 0; i < block->InputTypes().AboveTop(); ++i) {
+ if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
+ if (!first) out() << ", ";
+ out() << block->InputTypes().Peek(i)->GetGeneratedTNodeTypeName();
+ first = false;
+ }
+ }
+ out() << "> " << BlockName(block) << "(&ca_, compiler::CodeAssemblerLabel::"
+ << (block->IsDeferred() ? "kDeferred" : "kNonDeferred") << ");\n";
}
EmitInstruction(GotoInstruction{cfg_.start()}, &parameters);
for (Block* block : cfg_.blocks()) {
if (cfg_.end() && *cfg_.end() == block) continue;
- out_ << "\n if (" << BlockName(block) << ".is_used()) {\n";
+ if (block->IsDead()) continue;
+ out() << "\n";
+
+ // Redirect the output of non-declarations into a buffer and only output
+ // declarations right away.
+ std::stringstream out_buffer;
+ std::ostream* old_out = out_;
+ out_ = &out_buffer;
+
+ out() << " if (" << BlockName(block) << ".is_used()) {\n";
EmitBlock(block);
- out_ << " }\n";
+ out() << " }\n";
+
+ // All declarations have been printed now, so we can append the buffered
+ // output and redirect back to the original output stream.
+ out_ = old_out;
+ out() << out_buffer.str();
}
if (cfg_.end()) {
- out_ << "\n";
+ out() << "\n";
return EmitBlock(*cfg_.end());
}
return base::nullopt;
@@ -39,16 +67,20 @@ base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
Stack<std::string> stack;
- for (const Type* t : block->InputTypes()) {
- stack.Push(FreshNodeName());
- out_ << " TNode<" << t->GetGeneratedTNodeTypeName() << "> "
- << stack.Top() << ";\n";
- }
- out_ << " ca_.Bind(&" << BlockName(block);
- for (const std::string& name : stack) {
- out_ << ", &" << name;
+ std::stringstream phi_names;
+
+ for (BottomOffset i = 0; i < block->InputTypes().AboveTop(); ++i) {
+ const auto& def = block->InputDefinitions().Peek(i);
+ stack.Push(DefinitionToVariable(def));
+ if (def.IsPhiFromBlock(block)) {
+ decls() << " TNode<"
+ << block->InputTypes().Peek(i)->GetGeneratedTNodeTypeName()
+ << "> " << stack.Top() << ";\n";
+ phi_names << ", &" << stack.Top();
+ }
}
- out_ << ");\n";
+ out() << " ca_.Bind(&" << BlockName(block) << phi_names.str() << ");\n";
+
for (const Instruction& instruction : block->instructions()) {
EmitInstruction(instruction, &stack);
}
@@ -60,16 +92,32 @@ void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
// Lines in Torque SourcePositions are zero-based, while the
// CodeStubAssembler and downwind systems are one-based.
- out_ << " ca_.SetSourcePosition(\"" << file << "\", "
- << (pos.start.line + 1) << ");\n";
+ out() << " ca_.SetSourcePosition(\"" << file << "\", "
+ << (pos.start.line + 1) << ");\n";
previous_position_ = pos;
}
}
+bool CSAGenerator::IsEmptyInstruction(const Instruction& instruction) {
+ switch (instruction.kind()) {
+ case InstructionKind::kPeekInstruction:
+ case InstructionKind::kPokeInstruction:
+ case InstructionKind::kDeleteRangeInstruction:
+ case InstructionKind::kPushUninitializedInstruction:
+ case InstructionKind::kPushBuiltinPointerInstruction:
+ case InstructionKind::kUnsafeCastInstruction:
+ return true;
+ default:
+ return false;
+ }
+}
+
void CSAGenerator::EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack) {
#ifdef DEBUG
- EmitSourcePosition(instruction->pos);
+ if (!IsEmptyInstruction(instruction)) {
+ EmitSourcePosition(instruction->pos);
+ }
#endif
switch (instruction.kind()) {
@@ -103,15 +151,20 @@ void CSAGenerator::EmitInstruction(
// TODO(tebbi): This can trigger an error in CSA if it is used. Instead, we
// should prevent usage of uninitialized in the type system. This
// requires "if constexpr" being evaluated at Torque time.
- stack->Push("ca_.Uninitialized<" +
- instruction.type->GetGeneratedTNodeTypeName() + ">()");
+ const std::string str = "ca_.Uninitialized<" +
+ instruction.type->GetGeneratedTNodeTypeName() + ">()";
+ stack->Push(str);
+ SetDefinitionVariable(instruction.GetValueDefinition(), str);
}
void CSAGenerator::EmitInstruction(
const PushBuiltinPointerInstruction& instruction,
Stack<std::string>* stack) {
- stack->Push("ca_.UncheckedCast<BuiltinPtr>(ca_.SmiConstant(Builtins::k" +
- instruction.external_name + "))");
+ const std::string str =
+ "ca_.UncheckedCast<BuiltinPtr>(ca_.SmiConstant(Builtins::k" +
+ instruction.external_name + "))";
+ stack->Push(str);
+ SetDefinitionVariable(instruction.GetValueDefinition(), str);
}
void CSAGenerator::EmitInstruction(
@@ -119,26 +172,28 @@ void CSAGenerator::EmitInstruction(
Stack<std::string>* stack) {
const Type* type = instruction.constant->type();
std::vector<std::string> results;
- for (const Type* lowered : LowerType(type)) {
- results.push_back(FreshNodeName());
+
+ const auto lowered = LowerType(type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
- out_ << " TNode<" << lowered->GetGeneratedTNodeTypeName() << "> "
- << stack->Top() << ";\n";
- out_ << " USE(" << stack->Top() << ");\n";
- }
- out_ << " ";
- if (type->IsStructType()) {
- out_ << "std::tie(";
- PrintCommaSeparatedList(out_, results);
- out_ << ") = ";
+ decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
+ }
+
+ out() << " ";
+ if (type->StructSupertype()) {
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
} else if (results.size() == 1) {
- out_ << results[0] << " = ";
+ out() << results[0] << " = ";
}
- out_ << instruction.constant->external_name() << "(state_)";
- if (type->IsStructType()) {
- out_ << ".Flatten();\n";
+ out() << instruction.constant->external_name() << "(state_)";
+ if (type->StructSupertype()) {
+ out() << ".Flatten();\n";
} else {
- out_ << ";\n";
+ out() << ";\n";
}
}
@@ -175,22 +230,23 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.intrinsic->signature().return_type;
std::vector<std::string> results;
- for (const Type* type : LowerType(return_type)) {
- results.push_back(FreshNodeName());
+
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
- out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
- << stack->Top() << ";\n";
- out_ << " USE(" << stack->Top() << ");\n";
+ decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
}
- out_ << " ";
- if (return_type->IsStructType()) {
- out_ << "std::tie(";
- PrintCommaSeparatedList(out_, results);
- out_ << ") = ";
+ out() << " ";
+ if (return_type->StructSupertype()) {
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
} else {
if (results.size() == 1) {
- out_ << results[0] << " = ";
+ out() << results[0] << " = ";
}
}
@@ -207,13 +263,14 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
*original_type);
}
- if (return_type->GetGeneratedTNodeTypeName() !=
- original_type->GetGeneratedTNodeTypeName()) {
+ if (!original_type->StructSupertype() &&
+ return_type->GetGeneratedTNodeTypeName() !=
+ original_type->GetGeneratedTNodeTypeName()) {
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- out_ << "TORQUE_CAST";
+ out() << "TORQUE_CAST";
} else {
- out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
- << ">";
+ out() << "ca_.UncheckedCast<"
+ << return_type->GetGeneratedTNodeTypeName() << ">";
}
}
} else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
@@ -226,25 +283,25 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
ReportError("%FromConstexpr must return a non-constexpr type");
}
if (return_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
- out_ << "ca_.SmiConstant";
+ out() << "ca_.SmiConstant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetNumberType())) {
- out_ << "ca_.NumberConstant";
+ out() << "ca_.NumberConstant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetStringType())) {
- out_ << "ca_.StringConstant";
+ out() << "ca_.StringConstant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetObjectType())) {
ReportError(
"%FromConstexpr cannot cast to subclass of HeapObject unless it's a "
"String or Number");
} else if (return_type->IsSubtypeOf(TypeOracle::GetIntPtrType())) {
- out_ << "ca_.IntPtrConstant";
+ out() << "ca_.IntPtrConstant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
- out_ << "ca_.UintPtrConstant";
+ out() << "ca_.UintPtrConstant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetInt32Type())) {
- out_ << "ca_.Int32Constant";
+ out() << "ca_.Int32Constant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetUint32Type())) {
- out_ << "ca_.Uint32Constant";
+ out() << "ca_.Uint32Constant";
} else if (return_type->IsSubtypeOf(TypeOracle::GetBoolType())) {
- out_ << "ca_.BoolConstant";
+ out() << "ca_.BoolConstant";
} else {
std::stringstream s;
s << "%FromConstexpr does not support return type " << *return_type;
@@ -252,21 +309,21 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
}
// Wrap the raw constexpr value in a static_cast to ensure that
// enums get properly casted to their backing integral value.
- out_ << "(CastToUnderlyingTypeIfEnum";
+ out() << "(CastToUnderlyingTypeIfEnum";
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
}
- out_ << "(";
- PrintCommaSeparatedList(out_, args);
+ out() << "(";
+ PrintCommaSeparatedList(out(), args);
if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
- out_ << ")";
+ out() << ")";
}
- if (return_type->IsStructType()) {
- out_ << ").Flatten();\n";
+ if (return_type->StructSupertype()) {
+ out() << ").Flatten();\n";
} else {
- out_ << ");\n";
+ out() << ");\n";
}
}
@@ -282,42 +339,45 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.macro->signature().return_type;
std::vector<std::string> results;
- for (const Type* type : LowerType(return_type)) {
- results.push_back(FreshNodeName());
+
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
- out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
- << stack->Top() << ";\n";
- out_ << " USE(" << stack->Top() << ");\n";
+ decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
+ << stack->Top() << ";\n";
}
+
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
- out_ << " ";
- bool needs_flattening = return_type->IsStructType();
+ out() << " ";
+ bool needs_flattening = return_type->StructSupertype().has_value();
if (needs_flattening) {
- out_ << "std::tie(";
- PrintCommaSeparatedList(out_, results);
- out_ << ") = ";
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
} else {
if (results.size() == 1) {
- out_ << results[0] << " = ";
+ out() << results[0] << " = ";
} else {
DCHECK_EQ(0, results.size());
}
}
if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
- out_ << extern_macro->external_assembler_name() << "(state_).";
+ out() << extern_macro->external_assembler_name() << "(state_).";
} else {
args.insert(args.begin(), "state_");
}
- out_ << instruction.macro->ExternalName() << "(";
- PrintCommaSeparatedList(out_, args);
+ out() << instruction.macro->ExternalName() << "(";
+ PrintCommaSeparatedList(out(), args);
if (needs_flattening) {
- out_ << ").Flatten();\n";
+ out() << ").Flatten();\n";
} else {
- out_ << ");\n";
+ out() << ");\n";
}
PostCallableExceptionPreparation(catch_name, return_type,
- instruction.catch_block, &pre_call_stack);
+ instruction.catch_block, &pre_call_stack,
+ instruction.GetExceptionObjectDefinition());
}
void CSAGenerator::EmitInstruction(
@@ -333,13 +393,14 @@ void CSAGenerator::EmitInstruction(
Stack<std::string> pre_call_stack = *stack;
std::vector<std::string> results;
const Type* return_type = instruction.macro->signature().return_type;
+
if (return_type != TypeOracle::GetNeverType()) {
- for (const Type* type :
- LowerType(instruction.macro->signature().return_type)) {
- results.push_back(FreshNodeName());
- out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> "
- << results.back() << ";\n";
- out_ << " USE(" << results.back() << ");\n";
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(
+ DefinitionToVariable(instruction.GetValueDefinition(i)));
+ decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
+ << results.back() << ";\n";
}
}
@@ -349,77 +410,97 @@ void CSAGenerator::EmitInstruction(
DCHECK_EQ(labels.size(), instruction.label_blocks.size());
for (size_t i = 0; i < labels.size(); ++i) {
TypeVector label_parameters = labels[i].types;
- label_names.push_back("label" + std::to_string(i));
+ label_names.push_back(FreshLabelName());
var_names.push_back({});
for (size_t j = 0; j < label_parameters.size(); ++j) {
- var_names[i].push_back("result_" + std::to_string(i) + "_" +
- std::to_string(j));
- out_ << " compiler::TypedCodeAssemblerVariable<"
- << label_parameters[j]->GetGeneratedTNodeTypeName() << "> "
- << var_names[i][j] << "(&ca_);\n";
+ var_names[i].push_back(FreshNodeName());
+ const auto def = instruction.GetLabelValueDefinition(i, j);
+ SetDefinitionVariable(def, var_names[i].back() + ".value()");
+ decls() << " compiler::TypedCodeAssemblerVariable<"
+ << label_parameters[j]->GetGeneratedTNodeTypeName() << "> "
+ << var_names[i][j] << "(&ca_);\n";
}
- out_ << " compiler::CodeAssemblerLabel " << label_names[i]
- << "(&ca_);\n";
+ out() << " compiler::CodeAssemblerLabel " << label_names[i]
+ << "(&ca_);\n";
}
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
- out_ << " ";
+ out() << " ";
if (results.size() == 1) {
- out_ << results[0] << " = ";
+ out() << results[0] << " = ";
} else if (results.size() > 1) {
- out_ << "std::tie(";
- PrintCommaSeparatedList(out_, results);
- out_ << ") = ";
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
}
if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
- out_ << extern_macro->external_assembler_name() << "(state_).";
+ out() << extern_macro->external_assembler_name() << "(state_).";
} else {
args.insert(args.begin(), "state_");
}
- out_ << instruction.macro->ExternalName() << "(";
- PrintCommaSeparatedList(out_, args);
+ out() << instruction.macro->ExternalName() << "(";
+ PrintCommaSeparatedList(out(), args);
bool first = args.empty();
for (size_t i = 0; i < label_names.size(); ++i) {
- if (!first) out_ << ", ";
- out_ << "&" << label_names[i];
+ if (!first) out() << ", ";
+ out() << "&" << label_names[i];
first = false;
for (size_t j = 0; j < var_names[i].size(); ++j) {
- out_ << ", &" << var_names[i][j];
+ out() << ", &" << var_names[i][j];
}
}
- if (return_type->IsStructType()) {
- out_ << ").Flatten();\n";
+ if (return_type->StructSupertype()) {
+ out() << ").Flatten();\n";
} else {
- out_ << ");\n";
+ out() << ");\n";
}
PostCallableExceptionPreparation(catch_name, return_type,
- instruction.catch_block, &pre_call_stack);
+ instruction.catch_block, &pre_call_stack,
+ instruction.GetExceptionObjectDefinition());
if (instruction.return_continuation) {
- out_ << " ca_.Goto(&" << BlockName(*instruction.return_continuation);
- for (const std::string& value : *stack) {
- out_ << ", " << value;
- }
- for (const std::string& result : results) {
- out_ << ", " << result;
+ out() << " ca_.Goto(&" << BlockName(*instruction.return_continuation);
+ DCHECK_EQ(stack->Size() + results.size(),
+ (*instruction.return_continuation)->InputDefinitions().Size());
+
+ const auto& input_definitions =
+ (*instruction.return_continuation)->InputDefinitions();
+ for (BottomOffset i = 0; i < input_definitions.AboveTop(); ++i) {
+ if (input_definitions.Peek(i).IsPhiFromBlock(
+ *instruction.return_continuation)) {
+ out() << ", "
+ << (i < stack->AboveTop() ? stack->Peek(i) : results[i.offset]);
+ }
}
- out_ << ");\n";
- }
- for (size_t i = 0; i < label_names.size(); ++i) {
- out_ << " if (" << label_names[i] << ".is_used()) {\n";
- out_ << " ca_.Bind(&" << label_names[i] << ");\n";
- out_ << " ca_.Goto(&" << BlockName(instruction.label_blocks[i]);
- for (const std::string& value : *stack) {
- out_ << ", " << value;
+ out() << ");\n";
+ }
+ for (size_t l = 0; l < label_names.size(); ++l) {
+ out() << " if (" << label_names[l] << ".is_used()) {\n";
+ out() << " ca_.Bind(&" << label_names[l] << ");\n";
+ out() << " ca_.Goto(&" << BlockName(instruction.label_blocks[l]);
+ DCHECK_EQ(stack->Size() + var_names[l].size(),
+ instruction.label_blocks[l]->InputDefinitions().Size());
+
+ const auto& label_definitions =
+ instruction.label_blocks[l]->InputDefinitions();
+
+ BottomOffset i = 0;
+ for (; i < stack->AboveTop(); ++i) {
+ if (label_definitions.Peek(i).IsPhiFromBlock(
+ instruction.label_blocks[l])) {
+ out() << ", " << stack->Peek(i);
+ }
}
- for (const std::string& var : var_names[i]) {
- out_ << ", " << var << ".value()";
+ for (std::size_t k = 0; k < var_names[l].size(); ++k, ++i) {
+ if (label_definitions.Peek(i).IsPhiFromBlock(
+ instruction.label_blocks[l])) {
+ out() << ", " << var_names[l][k] << ".value()";
+ }
}
- out_ << ");\n";
-
- out_ << " }\n";
+ out() << ");\n";
+ out() << " }\n";
}
}
@@ -429,15 +510,16 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
std::vector<const Type*> result_types =
LowerType(instruction.builtin->signature().return_type);
if (instruction.is_tailcall) {
- out_ << " CodeStubAssembler(state_).TailCallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName() << ", ";
- PrintCommaSeparatedList(out_, arguments);
- out_ << ");\n";
+ out() << " CodeStubAssembler(state_).TailCallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName() << ", ";
+ PrintCommaSeparatedList(out(), arguments);
+ out() << ");\n";
} else {
- std::string result_name = FreshNodeName();
+ std::string result_name;
if (result_types.size() == 1) {
- out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
- << "> " << result_name << ";\n";
+ result_name = DefinitionToVariable(instruction.GetValueDefinition(0));
+ decls() << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
+ << "> " << result_name << ";\n";
}
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
@@ -445,27 +527,27 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
if (result_types.size() == 1) {
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
stack->Push(result_name);
- out_ << " " << result_name << " = ";
- if (generated_type != "Object") out_ << "TORQUE_CAST(";
- out_ << "CodeStubAssembler(state_).CallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName() << ", ";
- PrintCommaSeparatedList(out_, arguments);
- if (generated_type != "Object") out_ << ")";
- out_ << ");\n";
- out_ << " USE(" << result_name << ");\n";
+ out() << " " << result_name << " = ";
+ if (generated_type != "Object") out() << "TORQUE_CAST(";
+ out() << "CodeStubAssembler(state_).CallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName() << ", ";
+ PrintCommaSeparatedList(out(), arguments);
+ if (generated_type != "Object") out() << ")";
+ out() << ");\n";
} else {
DCHECK_EQ(0, result_types.size());
// TODO(tebbi): Actually, builtins have to return a value, so we should
// not have to handle this case.
- out_ << " CodeStubAssembler(state_).CallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName() << ", ";
- PrintCommaSeparatedList(out_, arguments);
- out_ << ");\n";
+ out() << " CodeStubAssembler(state_).CallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName() << ", ";
+ PrintCommaSeparatedList(out(), arguments);
+ out() << ");\n";
}
PostCallableExceptionPreparation(
catch_name,
result_types.size() == 0 ? TypeOracle::GetVoidType() : result_types[0],
- instruction.catch_block, &pre_call_stack);
+ instruction.catch_block, &pre_call_stack,
+ instruction.GetExceptionObjectDefinition());
}
}
@@ -483,20 +565,21 @@ void CSAGenerator::EmitInstruction(
ReportError("tail-calls to builtin pointers are not supported");
}
- stack->Push(FreshNodeName());
+ DCHECK_EQ(1, instruction.GetValueDefinitionCount());
+ stack->Push(DefinitionToVariable(instruction.GetValueDefinition(0)));
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
- out_ << " TNode<" << generated_type << "> " << stack->Top() << " = ";
- if (generated_type != "Object") out_ << "TORQUE_CAST(";
- out_ << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::"
- "CallableFor(ca_."
- "isolate(),"
- "ExampleBuiltinForTorqueFunctionPointerType("
- << instruction.type->function_pointer_type_id() << ")).descriptor(), ";
- PrintCommaSeparatedList(out_, function_and_arguments);
- out_ << ")";
- if (generated_type != "Object") out_ << ")";
- out_ << "; \n";
- out_ << " USE(" << stack->Top() << ");\n";
+ decls() << " TNode<" << generated_type << "> " << stack->Top() << ";\n";
+ out() << stack->Top() << " = ";
+ if (generated_type != "Object") out() << "TORQUE_CAST(";
+ out() << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::"
+ "CallableFor(ca_."
+ "isolate(),"
+ "ExampleBuiltinForTorqueFunctionPointerType("
+ << instruction.type->function_pointer_type_id() << ")).descriptor(), ";
+ PrintCommaSeparatedList(out(), function_and_arguments);
+ out() << ")";
+ if (generated_type != "Object") out() << ")";
+ out() << ";\n";
}
std::string CSAGenerator::PreCallableExceptionPreparation(
@@ -504,38 +587,52 @@ std::string CSAGenerator::PreCallableExceptionPreparation(
std::string catch_name;
if (catch_block) {
catch_name = FreshCatchName();
- out_ << " compiler::CodeAssemblerExceptionHandlerLabel " << catch_name
- << "__label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
- out_ << " { compiler::CodeAssemblerScopedExceptionHandler s(&ca_, &"
- << catch_name << "__label);\n";
+ out() << " compiler::CodeAssemblerExceptionHandlerLabel " << catch_name
+ << "__label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
+ out() << " { compiler::ScopedExceptionHandler s(&ca_, &" << catch_name
+ << "__label);\n";
}
return catch_name;
}
void CSAGenerator::PostCallableExceptionPreparation(
const std::string& catch_name, const Type* return_type,
- base::Optional<Block*> catch_block, Stack<std::string>* stack) {
+ base::Optional<Block*> catch_block, Stack<std::string>* stack,
+ const base::Optional<DefinitionLocation>& exception_object_definition) {
if (catch_block) {
+ DCHECK(exception_object_definition);
std::string block_name = BlockName(*catch_block);
- out_ << " }\n";
- out_ << " if (" << catch_name << "__label.is_used()) {\n";
- out_ << " compiler::CodeAssemblerLabel " << catch_name
- << "_skip(&ca_);\n";
+ out() << " }\n";
+ out() << " if (" << catch_name << "__label.is_used()) {\n";
+ out() << " compiler::CodeAssemblerLabel " << catch_name
+ << "_skip(&ca_);\n";
if (!return_type->IsNever()) {
- out_ << " ca_.Goto(&" << catch_name << "_skip);\n";
+ out() << " ca_.Goto(&" << catch_name << "_skip);\n";
}
- out_ << " TNode<Object> " << catch_name << "_exception_object;\n";
- out_ << " ca_.Bind(&" << catch_name << "__label, &" << catch_name
- << "_exception_object);\n";
- out_ << " ca_.Goto(&" << block_name;
- for (size_t i = 0; i < stack->Size(); ++i) {
- out_ << ", " << stack->begin()[i];
+ decls() << " TNode<Object> "
+ << DefinitionToVariable(*exception_object_definition) << ";\n";
+ out() << " ca_.Bind(&" << catch_name << "__label, &"
+ << DefinitionToVariable(*exception_object_definition) << ");\n";
+ out() << " ca_.Goto(&" << block_name;
+
+ DCHECK_EQ(stack->Size() + 1, (*catch_block)->InputDefinitions().Size());
+ const auto& input_definitions = (*catch_block)->InputDefinitions();
+ for (BottomOffset i = 0; i < input_definitions.AboveTop(); ++i) {
+ if (input_definitions.Peek(i).IsPhiFromBlock(*catch_block)) {
+ if (i < stack->AboveTop()) {
+ out() << ", " << stack->Peek(i);
+ } else {
+ DCHECK_EQ(i, stack->AboveTop());
+ out() << ", " << DefinitionToVariable(*exception_object_definition);
+ }
+ }
}
- out_ << ", " << catch_name << "_exception_object);\n";
+ out() << ");\n";
+
if (!return_type->IsNever()) {
- out_ << " ca_.Bind(&" << catch_name << "_skip);\n";
+ out() << " ca_.Bind(&" << catch_name << "_skip);\n";
}
- out_ << " }\n";
+ out() << " }\n";
}
}
@@ -552,15 +649,16 @@ void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
ReportError("runtime function must have at most one result");
}
if (instruction.is_tailcall) {
- out_ << " CodeStubAssembler(state_).TailCallRuntime(Runtime::k"
- << instruction.runtime_function->ExternalName() << ", ";
- PrintCommaSeparatedList(out_, arguments);
- out_ << ");\n";
+ out() << " CodeStubAssembler(state_).TailCallRuntime(Runtime::k"
+ << instruction.runtime_function->ExternalName() << ", ";
+ PrintCommaSeparatedList(out(), arguments);
+ out() << ");\n";
} else {
- std::string result_name = FreshNodeName();
+ std::string result_name;
if (result_types.size() == 1) {
- out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
- << "> " << result_name << ";\n";
+ result_name = DefinitionToVariable(instruction.GetValueDefinition(0));
+ decls() << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
+ << "> " << result_name << ";\n";
}
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
@@ -568,94 +666,132 @@ void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
if (result_types.size() == 1) {
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
stack->Push(result_name);
- out_ << " " << result_name << " = ";
- if (generated_type != "Object") out_ << "TORQUE_CAST(";
- out_ << "CodeStubAssembler(state_).CallRuntime(Runtime::k"
- << instruction.runtime_function->ExternalName() << ", ";
- PrintCommaSeparatedList(out_, arguments);
- out_ << ")";
- if (generated_type != "Object") out_ << ")";
- out_ << "; \n";
- out_ << " USE(" << result_name << ");\n";
+ out() << " " << result_name << " = ";
+ if (generated_type != "Object") out() << "TORQUE_CAST(";
+ out() << "CodeStubAssembler(state_).CallRuntime(Runtime::k"
+ << instruction.runtime_function->ExternalName() << ", ";
+ PrintCommaSeparatedList(out(), arguments);
+ out() << ")";
+ if (generated_type != "Object") out() << ")";
+ out() << "; \n";
} else {
DCHECK_EQ(0, result_types.size());
- out_ << " CodeStubAssembler(state_).CallRuntime(Runtime::k"
- << instruction.runtime_function->ExternalName() << ", ";
- PrintCommaSeparatedList(out_, arguments);
- out_ << ");\n";
+ out() << " CodeStubAssembler(state_).CallRuntime(Runtime::k"
+ << instruction.runtime_function->ExternalName() << ", ";
+ PrintCommaSeparatedList(out(), arguments);
+ out() << ");\n";
if (return_type == TypeOracle::GetNeverType()) {
- out_ << " CodeStubAssembler(state_).Unreachable();\n";
+ out() << " CodeStubAssembler(state_).Unreachable();\n";
} else {
DCHECK(return_type == TypeOracle::GetVoidType());
}
}
- PostCallableExceptionPreparation(catch_name, return_type,
- instruction.catch_block, &pre_call_stack);
+ PostCallableExceptionPreparation(
+ catch_name, return_type, instruction.catch_block, &pre_call_stack,
+ instruction.GetExceptionObjectDefinition());
}
}
void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
Stack<std::string>* stack) {
- out_ << " ca_.Branch(" << stack->Pop() << ", &"
- << BlockName(instruction.if_true) << ", &"
- << BlockName(instruction.if_false);
- for (const std::string& value : *stack) {
- out_ << ", " << value;
+ out() << " ca_.Branch(" << stack->Pop() << ", &"
+ << BlockName(instruction.if_true) << ", std::vector<Node*>{";
+
+ const auto& true_definitions = instruction.if_true->InputDefinitions();
+ DCHECK_EQ(stack->Size(), true_definitions.Size());
+ bool first = true;
+ for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ if (true_definitions.Peek(i).IsPhiFromBlock(instruction.if_true)) {
+ if (!first) out() << ", ";
+ out() << stack->Peek(i);
+ first = false;
+ }
}
- out_ << ");\n";
+
+ out() << "}, &" << BlockName(instruction.if_false) << ", std::vector<Node*>{";
+
+ const auto& false_definitions = instruction.if_false->InputDefinitions();
+ DCHECK_EQ(stack->Size(), false_definitions.Size());
+ first = true;
+ for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ if (false_definitions.Peek(i).IsPhiFromBlock(instruction.if_false)) {
+ if (!first) out() << ", ";
+ out() << stack->Peek(i);
+ first = false;
+ }
+ }
+
+ out() << "});\n";
}
void CSAGenerator::EmitInstruction(
const ConstexprBranchInstruction& instruction, Stack<std::string>* stack) {
- out_ << " if ((" << instruction.condition << ")) {\n";
- out_ << " ca_.Goto(&" << BlockName(instruction.if_true);
- for (const std::string& value : *stack) {
- out_ << ", " << value;
+ out() << " if ((" << instruction.condition << ")) {\n";
+ out() << " ca_.Goto(&" << BlockName(instruction.if_true);
+
+ const auto& true_definitions = instruction.if_true->InputDefinitions();
+ DCHECK_EQ(stack->Size(), true_definitions.Size());
+ for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ if (true_definitions.Peek(i).IsPhiFromBlock(instruction.if_true)) {
+ out() << ", " << stack->Peek(i);
+ }
}
- out_ << ");\n";
- out_ << " } else {\n";
- out_ << " ca_.Goto(&" << BlockName(instruction.if_false);
- for (const std::string& value : *stack) {
- out_ << ", " << value;
+
+ out() << ");\n";
+ out() << " } else {\n";
+ out() << " ca_.Goto(&" << BlockName(instruction.if_false);
+
+ const auto& false_definitions = instruction.if_false->InputDefinitions();
+ DCHECK_EQ(stack->Size(), false_definitions.Size());
+ for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ if (false_definitions.Peek(i).IsPhiFromBlock(instruction.if_false)) {
+ out() << ", " << stack->Peek(i);
+ }
}
- out_ << ");\n";
- out_ << " }\n";
+ out() << ");\n";
+ out() << " }\n";
}
void CSAGenerator::EmitInstruction(const GotoInstruction& instruction,
Stack<std::string>* stack) {
- out_ << " ca_.Goto(&" << BlockName(instruction.destination);
- for (const std::string& value : *stack) {
- out_ << ", " << value;
+ out() << " ca_.Goto(&" << BlockName(instruction.destination);
+ const auto& destination_definitions =
+ instruction.destination->InputDefinitions();
+ DCHECK_EQ(stack->Size(), destination_definitions.Size());
+ for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ if (destination_definitions.Peek(i).IsPhiFromBlock(
+ instruction.destination)) {
+ out() << ", " << stack->Peek(i);
+ }
}
- out_ << ");\n";
+ out() << ");\n";
}
void CSAGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
Stack<std::string>* stack) {
for (auto it = instruction.variable_names.rbegin();
it != instruction.variable_names.rend(); ++it) {
- out_ << " *" << *it << " = " << stack->Pop() << ";\n";
+ out() << " *" << *it << " = " << stack->Pop() << ";\n";
}
- out_ << " ca_.Goto(" << instruction.destination << ");\n";
+ out() << " ca_.Goto(" << instruction.destination << ");\n";
}
void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
Stack<std::string>* stack) {
if (*linkage_ == Builtin::kVarArgsJavaScript) {
- out_ << " " << ARGUMENTS_VARIABLE_STRING << ".PopAndReturn(";
+ out() << " " << ARGUMENTS_VARIABLE_STRING << ".PopAndReturn(";
} else {
- out_ << " CodeStubAssembler(state_).Return(";
+ out() << " CodeStubAssembler(state_).Return(";
}
- out_ << stack->Pop() << ");\n";
+ out() << stack->Pop() << ");\n";
}
void CSAGenerator::EmitInstruction(
const PrintConstantStringInstruction& instruction,
Stack<std::string>* stack) {
- out_ << " CodeStubAssembler(state_).Print("
- << StringLiteralQuote(instruction.message) << ");\n";
+ out() << " CodeStubAssembler(state_).Print("
+ << StringLiteralQuote(instruction.message) << ");\n";
}
void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
@@ -663,18 +799,18 @@ void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
switch (instruction.kind) {
case AbortInstruction::Kind::kUnreachable:
DCHECK(instruction.message.empty());
- out_ << " CodeStubAssembler(state_).Unreachable();\n";
+ out() << " CodeStubAssembler(state_).Unreachable();\n";
break;
case AbortInstruction::Kind::kDebugBreak:
DCHECK(instruction.message.empty());
- out_ << " CodeStubAssembler(state_).DebugBreak();\n";
+ out() << " CodeStubAssembler(state_).DebugBreak();\n";
break;
case AbortInstruction::Kind::kAssertionFailure: {
std::string file = StringLiteralQuote(
SourceFileMap::PathFromV8Root(instruction.pos.source));
- out_ << " CodeStubAssembler(state_).FailAssert("
- << StringLiteralQuote(instruction.message) << ", " << file << ", "
- << instruction.pos.start.line + 1 << ");\n";
+ out() << " CodeStubAssembler(state_).FailAssert("
+ << StringLiteralQuote(instruction.message) << ", " << file << ", "
+ << instruction.pos.start.line + 1 << ");\n";
break;
}
}
@@ -682,25 +818,30 @@ void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
Stack<std::string>* stack) {
- stack->Poke(stack->AboveTop() - 1,
- "ca_.UncheckedCast<" +
- instruction.destination_type->GetGeneratedTNodeTypeName() +
- ">(" + stack->Top() + ")");
+ const std::string str =
+ "ca_.UncheckedCast<" +
+ instruction.destination_type->GetGeneratedTNodeTypeName() + ">(" +
+ stack->Top() + ")";
+ stack->Poke(stack->AboveTop() - 1, str);
+ SetDefinitionVariable(instruction.GetValueDefinition(), str);
}
void CSAGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
Stack<std::string>* stack) {
- std::string result_name = FreshNodeName();
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
std::string offset = stack->Pop();
std::string object = stack->Pop();
stack->Push(result_name);
- out_ << " " << instruction.type->GetGeneratedTypeName() << result_name
- << " = CodeStubAssembler(state_).LoadReference<"
- << instruction.type->GetGeneratedTNodeTypeName()
- << ">(CodeStubAssembler::Reference{" << object << ", " << offset
- << "});\n";
+ decls() << " " << instruction.type->GetGeneratedTypeName() << " "
+ << result_name << ";\n";
+ out() << " " << result_name
+ << " = CodeStubAssembler(state_).LoadReference<"
+ << instruction.type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler::Reference{" << object << ", " << offset
+ << "});\n";
}
void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
@@ -709,25 +850,29 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
std::string offset = stack->Pop();
std::string object = stack->Pop();
- out_ << " CodeStubAssembler(state_).StoreReference<"
- << instruction.type->GetGeneratedTNodeTypeName()
- << ">(CodeStubAssembler::"
- "Reference{"
- << object << ", " << offset << "}, " << value << ");\n";
+ out() << " CodeStubAssembler(state_).StoreReference<"
+ << instruction.type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler::"
+ "Reference{"
+ << object << ", " << offset << "}, " << value << ");\n";
}
namespace {
std::string GetBitFieldSpecialization(const BitFieldStructType* container,
const BitField& field) {
- std::string suffix = field.num_bits == 1 ? "Bit" : "Bits";
- return "TorqueGenerated" + container->name() +
- "Fields::" + CamelifyString(field.name_and_type.name) + suffix;
+ std::stringstream stream;
+ stream << "base::BitField<"
+ << field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
+ << field.offset << ", " << field.num_bits << ", "
+ << container->GetConstexprGeneratedTypeName() << ">";
+ return stream.str();
}
} // namespace
void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
Stack<std::string>* stack) {
- std::string result_name = FreshNodeName();
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
std::string bit_field_struct = stack->Pop();
stack->Push(result_name);
@@ -742,17 +887,20 @@ void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
? (result_uintptr ? "DecodeWord" : "DecodeWord32FromWord")
: (result_uintptr ? "DecodeWordFromWord32" : "DecodeWord32");
- out_ << " " << result_type->GetGeneratedTypeName() << result_name
- << " = ca_.UncheckedCast<" << result_type->GetGeneratedTNodeTypeName()
- << ">(CodeStubAssembler(state_)." << decoder << "<"
- << GetBitFieldSpecialization(source_type, instruction.bit_field)
- << ">(ca_.UncheckedCast<" << source_word_type << ">(" << bit_field_struct
- << ")));\n";
+ decls() << " " << result_type->GetGeneratedTypeName() << " " << result_name
+ << ";\n";
+ out() << " " << result_name << " = ca_.UncheckedCast<"
+ << result_type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler(state_)." << decoder << "<"
+ << GetBitFieldSpecialization(source_type, instruction.bit_field)
+ << ">(ca_.UncheckedCast<" << source_word_type << ">("
+ << bit_field_struct << ")));\n";
}
void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
Stack<std::string>* stack) {
- std::string result_name = FreshNodeName();
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
std::string value = stack->Pop();
std::string bit_field_struct = stack->Pop();
@@ -768,13 +916,15 @@ void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
struct_uintptr ? (field_uintptr ? "UpdateWord" : "UpdateWord32InWord")
: (field_uintptr ? "UpdateWordInWord32" : "UpdateWord32");
- out_ << " " << struct_type->GetGeneratedTypeName() << result_name
- << " = ca_.UncheckedCast<" << struct_type->GetGeneratedTNodeTypeName()
- << ">(CodeStubAssembler(state_)." << encoder << "<"
- << GetBitFieldSpecialization(struct_type, instruction.bit_field)
- << ">(ca_.UncheckedCast<" << struct_word_type << ">(" << bit_field_struct
- << "), ca_.UncheckedCast<" << field_word_type << ">(" << value
- << ")));\n";
+ decls() << " " << struct_type->GetGeneratedTypeName() << " " << result_name
+ << ";\n";
+ out() << " " << result_name << " = ca_.UncheckedCast<"
+ << struct_type->GetGeneratedTNodeTypeName()
+ << ">(CodeStubAssembler(state_)." << encoder << "<"
+ << GetBitFieldSpecialization(struct_type, instruction.bit_field)
+ << ">(ca_.UncheckedCast<" << struct_word_type << ">("
+ << bit_field_struct << "), ca_.UncheckedCast<" << field_word_type
+ << ">(" << value << ")));\n";
}
// static
@@ -783,10 +933,10 @@ void CSAGenerator::EmitCSAValue(VisitResult result,
std::ostream& out) {
if (!result.IsOnStack()) {
out << result.constexpr_value();
- } else if (auto* struct_type = StructType::DynamicCast(result.type())) {
- out << struct_type->GetGeneratedTypeName() << "{";
+ } else if (auto struct_type = result.type()->StructSupertype()) {
+ out << (*struct_type)->GetGeneratedTypeName() << "{";
bool first = true;
- for (auto& field : struct_type->fields()) {
+ for (auto& field : (*struct_type)->fields()) {
if (!first) {
out << ", ";
}
diff --git a/deps/v8/src/torque/csa-generator.h b/deps/v8/src/torque/csa-generator.h
index 5790d9434c..83c4ec410a 100644
--- a/deps/v8/src/torque/csa-generator.h
+++ b/deps/v8/src/torque/csa-generator.h
@@ -19,7 +19,8 @@ class CSAGenerator {
CSAGenerator(const ControlFlowGraph& cfg, std::ostream& out,
base::Optional<Builtin::Kind> linkage = base::nullopt)
: cfg_(cfg),
- out_(out),
+ out_(&out),
+ out_decls_(&out),
linkage_(linkage),
previous_position_(SourcePosition::Invalid()) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
@@ -31,22 +32,56 @@ class CSAGenerator {
private:
const ControlFlowGraph& cfg_;
- std::ostream& out_;
+ std::ostream* out_;
+ std::ostream* out_decls_;
size_t fresh_id_ = 0;
base::Optional<Builtin::Kind> linkage_;
SourcePosition previous_position_;
+ std::map<DefinitionLocation, std::string> location_map_;
+ std::string DefinitionToVariable(const DefinitionLocation& location) {
+ if (location.IsPhi()) {
+ std::stringstream stream;
+ stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
+ << location.GetPhiIndex();
+ return stream.str();
+ } else if (location.IsParameter()) {
+ auto it = location_map_.find(location);
+ DCHECK_NE(it, location_map_.end());
+ return it->second;
+ } else {
+ DCHECK(location.IsInstruction());
+ auto it = location_map_.find(location);
+ if (it == location_map_.end()) {
+ it = location_map_.insert(std::make_pair(location, FreshNodeName()))
+ .first;
+ }
+ return it->second;
+ }
+ }
+
+ void SetDefinitionVariable(const DefinitionLocation& definition,
+ const std::string& str) {
+ DCHECK_EQ(location_map_.find(definition), location_map_.end());
+ location_map_.insert(std::make_pair(definition, str));
+ }
+
+ std::ostream& out() { return *out_; }
+ std::ostream& decls() { return *out_decls_; }
+
+ bool IsEmptyInstruction(const Instruction& instruction);
void EmitSourcePosition(SourcePosition pos, bool always_emit = false);
std::string PreCallableExceptionPreparation(
base::Optional<Block*> catch_block);
- void PostCallableExceptionPreparation(const std::string& catch_name,
- const Type* return_type,
- base::Optional<Block*> catch_block,
- Stack<std::string>* stack);
+ void PostCallableExceptionPreparation(
+ const std::string& catch_name, const Type* return_type,
+ base::Optional<Block*> catch_block, Stack<std::string>* stack,
+ const base::Optional<DefinitionLocation>& exception_object_definition);
std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
+ std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
std::string BlockName(const Block* block) {
return "block" + std::to_string(block->id());
}
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index fe9a522b18..b6fdef67b9 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -340,7 +340,7 @@ class Macro : public Callable {
bool ShouldBeInlined() const override {
for (const LabelDeclaration& label : signature().labels) {
for (const Type* type : label.types) {
- if (type->IsStructType()) return true;
+ if (type->StructSupertype()) return true;
}
}
// Intrinsics that are used internally in Torque and implemented as torque
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 32c566ce8a..5e3c8bbcb2 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -91,18 +91,16 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
}
for (size_t i = 0; i < signature.types().size(); ++i) {
- if (const StructType* type =
- StructType::DynamicCast(signature.types()[i])) {
- Error("Builtin '", decl->name, "' uses the struct '", type->name(),
- "' as argument '", signature.parameter_names[i],
- "', which is not supported.");
+ if (signature.types()[i]->StructSupertype()) {
+ Error("Builtin do not support structs as arguments, but argument ",
+ signature.parameter_names[i], " has type ", *signature.types()[i],
+ ".");
}
}
- if (const StructType* struct_type =
- StructType::DynamicCast(signature.return_type)) {
- Error("Builtins ", decl->name, " cannot return structs ",
- struct_type->name());
+ if (signature.return_type->StructSupertype()) {
+ Error("Builtins cannot return structs, but the return type is ",
+ *signature.return_type, ".");
}
return Declarations::CreateBuiltin(std::move(external_name),
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index bb84f9bdf4..bbfbb686ef 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -35,15 +35,6 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return Get().declarables_;
}
- static void RegisterClass(const TypeAlias* alias) {
- DCHECK(alias->ParentScope()->IsNamespace());
- Get().classes_.push_back(alias);
- }
-
- using GlobalClassList = std::vector<const TypeAlias*>;
-
- static const GlobalClassList& GetClasses() { return Get().classes_; }
-
static void AddCppInclude(std::string include_path) {
Get().cpp_includes_.insert(std::move(include_path));
}
@@ -84,7 +75,6 @@ class GlobalContext : public ContextualClass<GlobalContext> {
std::vector<std::unique_ptr<Declarable>> declarables_;
std::set<std::string> cpp_includes_;
std::map<SourceId, PerFileStreams> generated_per_file_;
- GlobalClassList classes_;
std::map<std::string, size_t> fresh_ids_;
friend class LanguageServerData;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index ae6ceb3ccf..820576e02a 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -3,15 +3,19 @@
// found in the LICENSE file.
#include <algorithm>
+#include <string>
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
+#include "src/torque/global-context.h"
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
+#include "src/torque/types.h"
namespace v8 {
namespace internal {
@@ -137,7 +141,7 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
assembler_ = base::nullopt;
- source_out() << "return ";
+ source_out() << " return ";
CSAGenerator::EmitCSAValue(return_result, values, source_out());
source_out() << ";\n";
source_out() << "}\n\n";
@@ -433,8 +437,9 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
source_out()
<< " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
- source_out()
- << " TNode<IntPtrT> arguments_length(ChangeInt32ToIntPtr(argc));\n";
+ source_out() << " TNode<IntPtrT> "
+ "arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
+ "Int32T>(argc)));\n";
source_out() << " TNode<RawPtrT> arguments_frame = "
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
source_out() << " TorqueStructArguments "
@@ -1281,17 +1286,16 @@ LocationReference ImplementationVisitor::GenerateFieldReference(
result_range.Extend(length.stack_range());
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
-
} else {
- VisitResult heap_reference(
- TypeOracle::GetReferenceType(field.name_and_type.type), result_range);
- return LocationReference::HeapReference(heap_reference);
+ const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
+ field.const_qualified);
+ return LocationReference::HeapReference(VisitResult(type, result_range));
}
}
// This is used to generate field references during initialization, where we can
// re-use the offsets used for computing the allocation size.
-LocationReference ImplementationVisitor::GenerateFieldReference(
+LocationReference ImplementationVisitor::GenerateFieldReferenceForInit(
VisitResult object, const Field& field,
const LayoutForInitialization& layout) {
StackRange result_range = assembler().TopRange(0);
@@ -1306,8 +1310,10 @@ LocationReference ImplementationVisitor::GenerateFieldReference(
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
} else {
+ // Const fields are writable during initialization.
VisitResult heap_reference(
- TypeOracle::GetReferenceType(field.name_and_type.type), result_range);
+ TypeOracle::GetMutableReferenceType(field.name_and_type.type),
+ result_range);
return LocationReference::HeapReference(heap_reference);
}
}
@@ -1324,7 +1330,7 @@ void ImplementationVisitor::InitializeClass(
VisitResult initializer_value =
initializer_results.field_value_map.at(f.name_and_type.name);
LocationReference field =
- GenerateFieldReference(allocate_result, f, layout);
+ GenerateFieldReferenceForInit(allocate_result, f, layout);
if (f.index) {
DCHECK(field.IsHeapSlice());
VisitResult slice = field.GetVisitResult();
@@ -1339,7 +1345,7 @@ void ImplementationVisitor::InitializeClass(
VisitResult ImplementationVisitor::GenerateArrayLength(
Expression* array_length, Namespace* nspace,
- const std::map<std::string, LocationReference>& bindings) {
+ const std::map<std::string, LocalValue>& bindings) {
StackScope stack_scope(this);
CurrentSourcePosition::Scope pos_scope(array_length->pos);
// Switch to the namespace where the class was declared.
@@ -1363,11 +1369,15 @@ VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
StackScope stack_scope(this);
const ClassType* class_type = *object.type()->ClassSupertype();
- std::map<std::string, LocationReference> bindings;
+ std::map<std::string, LocalValue> bindings;
for (Field f : class_type->ComputeAllFields()) {
if (f.index) break;
bindings.insert(
- {f.name_and_type.name, GenerateFieldReference(object, f, class_type)});
+ {f.name_and_type.name,
+ f.const_qualified
+ ? LocalValue{GenerateFieldReference(object, f, class_type)}
+ : LocalValue(
+ "Non-const fields cannot be used for array lengths.")});
}
return stack_scope.Yield(
GenerateArrayLength(*field.index, class_type->nspace(), bindings));
@@ -1379,13 +1389,18 @@ VisitResult ImplementationVisitor::GenerateArrayLength(
DCHECK(field.index);
StackScope stack_scope(this);
- std::map<std::string, LocationReference> bindings;
+ std::map<std::string, LocalValue> bindings;
for (Field f : class_type->ComputeAllFields()) {
if (f.index) break;
const std::string& fieldname = f.name_and_type.name;
VisitResult value = initializer_results.field_value_map.at(fieldname);
- bindings.insert({fieldname, LocationReference::Temporary(
- value, "initial field " + fieldname)});
+ bindings.insert(
+ {fieldname,
+ f.const_qualified
+ ? LocalValue{LocationReference::Temporary(
+ value, "initial field " + fieldname)}
+ : LocalValue(
+ "Non-const fields cannot be used for array lengths.")});
}
return stack_scope.Yield(
GenerateArrayLength(*field.index, class_type->nspace(), bindings));
@@ -1481,7 +1496,7 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
object_map = GenerateCall(
- QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetStructMap"),
+ QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetInstanceTypeMap"),
get_struct_map_arguments, {}, false);
CurrentSourcePosition::Scope current_pos(expr->pos);
initializer_results.names.insert(initializer_results.names.begin(),
@@ -1653,7 +1668,7 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
size_t i = 0;
for (const Type* type : label_info.types) {
std::string generated_type_name;
- if (type->IsStructType()) {
+ if (type->StructSupertype()) {
generated_type_name = "\n#error no structs allowed in labels\n";
} else {
generated_type_name = "compiler::TypedCodeAssemblerVariable<";
@@ -1948,15 +1963,15 @@ LocationReference ImplementationVisitor::GetLocationReference(
LocationReference ImplementationVisitor::GetLocationReference(
FieldAccessExpression* expr) {
return GenerateFieldAccess(GetLocationReference(expr->object),
- expr->field->value, expr->field->pos);
+ expr->field->value, false, expr->field->pos);
}
LocationReference ImplementationVisitor::GenerateFieldAccess(
LocationReference reference, const std::string& fieldname,
- base::Optional<SourcePosition> pos) {
+ bool ignore_stuct_field_constness, base::Optional<SourcePosition> pos) {
if (reference.IsVariableAccess() &&
- reference.variable().type()->IsStructType()) {
- const StructType* type = StructType::cast(reference.variable().type());
+ reference.variable().type()->StructSupertype()) {
+ const StructType* type = *reference.variable().type()->StructSupertype();
const Field& field = type->LookupField(fieldname);
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
@@ -1970,9 +1985,10 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
ProjectStructField(reference.variable(), fieldname));
}
}
- if (reference.IsTemporary() && reference.temporary().type()->IsStructType()) {
+ if (reference.IsTemporary() &&
+ reference.temporary().type()->StructSupertype()) {
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
- const StructType* type = StructType::cast(reference.temporary().type());
+ const StructType* type = *reference.temporary().type()->StructSupertype();
const Field& field = type->LookupField(fieldname);
LanguageServerData::AddDefinition(*pos, field.pos);
}
@@ -1988,20 +2004,23 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
}
if (reference.IsHeapReference()) {
VisitResult ref = reference.heap_reference();
- auto generic_type = StructType::MatchUnaryGeneric(
- ref.type(), TypeOracle::GetReferenceGeneric());
+ bool is_const;
+ auto generic_type =
+ TypeOracle::MatchReferenceGeneric(ref.type(), &is_const);
if (!generic_type) {
ReportError(
"Left-hand side of field access expression is marked as a reference "
"but is not of type Reference<...>. Found type: ",
ref.type()->ToString());
}
- if (const StructType* struct_type =
- StructType::DynamicCast(*generic_type)) {
- const Field& field = struct_type->LookupField(fieldname);
+ if (auto struct_type = (*generic_type)->StructSupertype()) {
+ const Field& field = (*struct_type)->LookupField(fieldname);
// Update the Reference's type to refer to the field type within the
// struct.
- ref.SetType(TypeOracle::GetReferenceType(field.name_and_type.type));
+ ref.SetType(TypeOracle::GetReferenceType(
+ field.name_and_type.type,
+ is_const ||
+ (field.const_qualified && !ignore_stuct_field_constness)));
if (!field.offset.has_value()) {
Error("accessing field with unknown offset").Throw();
}
@@ -2075,12 +2094,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
ReportError("cannot have generic parameters on local name ",
expr->name);
}
- const LocationReference& ref = (*value)->value;
- if (ref.IsVariableAccess()) {
- // Attach the binding to enable the never-assigned-to lint check.
- return LocationReference::VariableAccess(ref.GetVisitResult(), *value);
- }
- return ref;
+ return (*value)->GetLocationReference(*value);
}
}
@@ -2136,9 +2150,10 @@ LocationReference ImplementationVisitor::GetLocationReference(
LocationReference ImplementationVisitor::GetLocationReference(
DereferenceExpression* expr) {
VisitResult ref = Visit(expr->reference);
- if (!Type::MatchUnaryGeneric(ref.type(), TypeOracle::GetReferenceGeneric())) {
- ReportError("Operator * expects a reference but found a value of type ",
- *ref.type());
+ if (!TypeOracle::MatchReferenceGeneric(ref.type())) {
+ Error("Operator * expects a reference type but found a value of type ",
+ *ref.type())
+ .Throw();
}
return LocationReference::HeapReference(ref);
}
@@ -2155,9 +2170,9 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"LoadFloat64OrHole"),
Arguments{{reference.heap_reference()}, {}});
- } else if (auto* struct_type = StructType::DynamicCast(referenced_type)) {
+ } else if (auto struct_type = referenced_type->StructSupertype()) {
StackRange result_range = assembler().TopRange(0);
- for (const Field& field : struct_type->fields()) {
+ for (const Field& field : (*struct_type)->fields()) {
StackScope scope(this);
const std::string& fieldname = field.name_and_type.name;
VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
@@ -2212,20 +2227,27 @@ void ImplementationVisitor::GenerateAssignToLocation(
ReportError("assigning a value directly to an indexed field isn't allowed");
} else if (reference.IsHeapReference()) {
const Type* referenced_type = reference.ReferencedType();
+ if (reference.IsConst()) {
+ Error("cannot assign to const value of type ", *referenced_type).Throw();
+ }
if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
GenerateCall(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"StoreFloat64OrHole"),
Arguments{{reference.heap_reference(), assignment_value}, {}});
- } else if (auto* struct_type = StructType::DynamicCast(referenced_type)) {
- if (assignment_value.type() != referenced_type) {
+ } else if (auto struct_type = referenced_type->StructSupertype()) {
+ if (!assignment_value.type()->IsSubtypeOf(referenced_type)) {
ReportError("Cannot assign to ", *referenced_type,
" with value of type ", *assignment_value.type());
}
- for (const Field& field : struct_type->fields()) {
+ for (const Field& field : (*struct_type)->fields()) {
const std::string& fieldname = field.name_and_type.name;
+ // Allow assignment of structs even if they contain const fields.
+ // Const on struct fields just disallows direct writes to them.
+ bool ignore_stuct_field_constness = true;
GenerateAssignToLocation(
- GenerateFieldAccess(reference, fieldname),
+ GenerateFieldAccess(reference, fieldname,
+ ignore_stuct_field_constness),
ProjectStructField(assignment_value, fieldname));
}
} else {
@@ -2358,7 +2380,8 @@ VisitResult ImplementationVisitor::GenerateCall(
"' required for call to '", callable->ReadableName(),
"' is not defined");
}
- implicit_arguments.push_back(GenerateFetchFromLocation((*val)->value));
+ implicit_arguments.push_back(
+ GenerateFetchFromLocation((*val)->GetLocationReference(*val)));
}
std::vector<VisitResult> converted_arguments;
@@ -2758,9 +2781,9 @@ std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
StackRange ImplementationVisitor::LowerParameter(
const Type* type, const std::string& parameter_name,
Stack<std::string>* lowered_parameters) {
- if (const StructType* struct_type = StructType::DynamicCast(type)) {
+ if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
StackRange range = lowered_parameters->TopRange(0);
- for (auto& field : struct_type->fields()) {
+ for (auto& field : (*struct_type)->fields()) {
StackRange parameter_range = LowerParameter(
field.name_and_type.type,
parameter_name + "." + field.name_and_type.name, lowered_parameters);
@@ -2776,8 +2799,8 @@ StackRange ImplementationVisitor::LowerParameter(
void ImplementationVisitor::LowerLabelParameter(
const Type* type, const std::string& parameter_name,
std::vector<std::string>* lowered_parameters) {
- if (const StructType* struct_type = StructType::DynamicCast(type)) {
- for (auto& field : struct_type->fields()) {
+ if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
+ for (auto& field : (*struct_type)->fields()) {
LowerLabelParameter(
field.name_and_type.type,
"&((*" + parameter_name + ")." + field.name_and_type.name + ")",
@@ -2969,8 +2992,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
} else {
builtin_definitions << "TFJ(" << builtin->ExternalName();
if (builtin->IsVarArgsJavaScript()) {
- builtin_definitions
- << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
+ builtin_definitions << ", kDontAdaptArgumentsSentinel";
} else {
DCHECK(builtin->IsFixedArgsJavaScript());
// FixedArg javascript builtins need to offer the parameter
@@ -3061,9 +3083,9 @@ class FieldOffsetsGenerator {
UpdateSection(f);
// Emit kHeaderSize before any indexed field.
- // TODO(tebbi): Generalize this code to work with multiple indexed fields.
- if (f.index.has_value()) {
+ if (f.index.has_value() && !header_size_emitted_) {
WriteMarker("kHeaderSize");
+ header_size_emitted_ = true;
}
// We don't know statically how much space an indexed field takes, so report
@@ -3103,14 +3125,28 @@ class FieldOffsetsGenerator {
private:
FieldSectionType GetSectionFor(const Field& f) {
- if (f.name_and_type.type == TypeOracle::GetVoidType()) {
+ const Type* field_type = f.name_and_type.type;
+ if (field_type == TypeOracle::GetVoidType()) {
// Allow void type for marker constants of size zero.
return current_section_;
}
+ StructType::Classification struct_contents =
+ StructType::ClassificationFlag::kEmpty;
+ if (auto field_as_struct = field_type->StructSupertype()) {
+ struct_contents = (*field_as_struct)->ClassifyContents();
+ }
+ if (struct_contents == StructType::ClassificationFlag::kMixed) {
+ // We can't declare what section a struct goes in if it has multiple
+ // categories of data within.
+ Error(
+ "Classes do not support fields which are structs containing both "
+ "tagged and untagged data.")
+ .Position(f.pos);
+ }
// Currently struct-valued fields are only allowed to have tagged data; see
// TypeVisitor::VisitClassFieldsAndMethods.
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
- f.name_and_type.type->IsStructType()) {
+ if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
+ struct_contents == StructType::ClassificationFlag::kTagged) {
if (f.is_weak) {
return FieldSectionType::kWeakSection;
} else {
@@ -3150,6 +3186,7 @@ class FieldOffsetsGenerator {
FieldSectionType current_section_ = FieldSectionType::kNoSection;
FieldSections completed_sections_ = FieldSectionType::kNoSection;
bool is_finished_ = false;
+ bool header_size_emitted_ = false;
};
class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
@@ -3172,48 +3209,23 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
std::ostream& out_;
};
-} // namespace
-
-void ImplementationVisitor::GenerateCppForInternalClasses(
- const std::string& output_directory) {
- std::stringstream header;
- std::stringstream inl;
- std::string base_name = "internal-class-definitions-tq";
- {
- IncludeGuardScope header_guard(header, base_name + ".h");
- header << "#include \"src/objects/objects.h\"\n";
- header << "#include \"src/objects/struct.h\"\n";
- header << "#include \"src/objects/js-objects.h\"\n";
- header << "#include \"src/utils/utils.h\"\n";
- header << "#include \"torque-generated/class-definitions-tq.h\"\n";
- IncludeObjectMacrosScope header_macros(header);
- NamespaceScope header_namespaces(header, {"v8", "internal"});
-
- IncludeGuardScope inl_guard(inl, base_name + "-inl.h");
- inl << "#include \"torque-generated/" << base_name << ".h\"\n";
- inl << "#include \"torque-generated/class-definitions-tq-inl.h\"\n";
- IncludeObjectMacrosScope inl_macros(inl);
- NamespaceScope inl_namespaces(inl, {"v8", "internal"});
-
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
- if (type->IsExtern()) continue;
- const ClassType* super = type->GetSuperClass();
- std::string parent = "TorqueGenerated" + type->name() + "<" +
- type->name() + ", " + super->name() + ">";
- header << "class " << type->name() << ": public " << parent << " {\n";
- header << " public:\n";
- header << " TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
- header << "};\n\n";
-
- inl << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
- }
- }
- std::string dir_basename = output_directory + "/" + base_name;
- WriteFile(dir_basename + ".h", header.str());
- WriteFile(dir_basename + "-inl.h", inl.str());
+void GenerateClassExport(const ClassType* type, std::ostream& header,
+ std::ostream& inl_header) {
+ const ClassType* super = type->GetSuperClass();
+ std::string parent = "TorqueGenerated" + type->name() + "<" + type->name() +
+ ", " + super->name() + ">";
+ header << "class " << type->name() << " : public " << parent << " {\n";
+ header << " public:\n";
+ if (type->ShouldGenerateBodyDescriptor()) {
+ header << " class BodyDescriptor;\n";
+ }
+ header << " TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
+ header << "};\n\n";
+ inl_header << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
}
+} // namespace
+
void ImplementationVisitor::GenerateClassFieldOffsets(
const std::string& output_directory) {
std::stringstream header;
@@ -3221,9 +3233,7 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
{
IncludeGuardScope include_guard(header, file_name);
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
-
+ for (const ClassType* type : TypeOracle::GetClasses()) {
// TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
// to generate field offsets without the use of macros.
if (!type->GenerateCppClassDefinitions() && !type->HasUndefinedLayout()) {
@@ -3236,6 +3246,16 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
header << "\n";
}
}
+
+ header << "#define TORQUE_BODY_DESCRIPTOR_LIST_GENERATOR(V, _)\\\n";
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ if (type->ShouldGenerateBodyDescriptor()) {
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ header << "V(_, " << type_name << ", " << type->name() << ")\\\n";
+ }
+ }
+ header << "\n";
}
const std::string output_header_path = output_directory + "/" + file_name;
WriteFile(output_header_path, header.str());
@@ -3250,29 +3270,35 @@ void ImplementationVisitor::GenerateBitFields(
header << "#include \"src/base/bit-field.h\"\n\n";
NamespaceScope namespaces(header, {"v8", "internal"});
- // TODO(v8:7793): Once we can define enums in Torque, we should be able to
- // do something nicer than hard-coding these predeclarations. Until then,
- // any enum used as a bitfield must be included in this list.
- header << R"(
-enum class FunctionSyntaxKind : uint8_t;
-enum class BailoutReason : uint8_t;
-enum FunctionKind : uint8_t;
-
-)";
-
for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
- header << "struct TorqueGenerated" << type->name() << "Fields {\n";
+ bool all_single_bits = true; // Track whether every field is one bit.
+
+ header << "#define DEFINE_TORQUE_GENERATED_"
+ << CapifyStringWithUnderscores(type->name()) << "() \\\n";
std::string type_name = type->GetConstexprGeneratedTypeName();
for (const auto& field : type->fields()) {
const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
+ all_single_bits = all_single_bits && field.num_bits == 1;
std::string field_type_name =
field.name_and_type.type->GetConstexprGeneratedTypeName();
header << " using " << CamelifyString(field.name_and_type.name)
<< suffix << " = base::BitField<" << field_type_name << ", "
<< field.offset << ", " << field.num_bits << ", " << type_name
- << ">;\n";
+ << ">; \\\n";
+ }
+
+ // If every field is one bit, we can also generate a convenient enum.
+ if (all_single_bits) {
+ header << " enum Flag { \\\n";
+ header << " kNone = 0, \\\n";
+ for (const auto& field : type->fields()) {
+ header << " k" << CamelifyString(field.name_and_type.name)
+ << " = 1 << " << field.offset << ", \\\n";
+ }
+ header << " }; \\\n";
}
- header << "};\n\n";
+
+ header << "\n";
}
}
const std::string output_header_path = output_directory + "/" + file_name;
@@ -3345,6 +3371,29 @@ class CppClassGenerator {
std::ostream& impl_;
};
+base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
+ const ClassType& type) {
+ std::vector<Field> result;
+ std::set<std::string> index_names;
+ for (const Field& field : type.ComputeAllFields()) {
+ if (field.index) {
+ auto name_and_type = ExtractSimpleFieldArraySize(type, *field.index);
+ if (!name_and_type) {
+ return base::nullopt;
+ }
+ index_names.insert(name_and_type->name);
+ }
+ }
+
+ for (const Field& field : type.ComputeAllFields()) {
+ if (index_names.count(field.name_and_type.name) != 0) {
+ result.push_back(field);
+ }
+ }
+
+ return result;
+}
+
void CppClassGenerator::GenerateClass() {
hdr_ << template_decl() << "\n";
hdr_ << "class " << gen_name_ << " : public P {\n";
@@ -3354,10 +3403,16 @@ void CppClassGenerator::GenerateClass() {
<< " \"Pass in " << super_->name()
<< " as second template parameter for " << gen_name_ << ".\");\n";
hdr_ << " public: \n";
- hdr_ << " using Super = P;\n";
+ hdr_ << " using Super = P;\n\n";
+ if (!type_->ShouldExport() && !type_->IsExtern()) {
+ hdr_ << " protected: // not extern or @export\n";
+ }
for (const Field& f : type_->fields()) {
GenerateFieldAccessor(f);
}
+ if (!type_->ShouldExport() && !type_->IsExtern()) {
+ hdr_ << " public:\n";
+ }
GenerateClassCasts();
@@ -3389,9 +3444,70 @@ void CppClassGenerator::GenerateClass() {
g.Finish();
hdr_ << "\n";
+ auto index_fields = GetOrderedUniqueIndexFields(*type_);
+
+ if (!index_fields.has_value()) {
+ hdr_ << " // SizeFor implementations not generated due to complex array "
+ "lengths\n\n";
+ } else if (!type_->IsAbstract() &&
+ !type_->IsSubtypeOf(TypeOracle::GetJSObjectType())) {
+ hdr_ << " V8_INLINE static constexpr int32_t SizeFor(";
+ bool first = true;
+ for (const Field& field : *index_fields) {
+ if (!first) hdr_ << ", ";
+ hdr_ << "int " << field.name_and_type.name;
+ first = false;
+ }
+ hdr_ << ") {\n";
+ if (index_fields->empty()) {
+ hdr_ << " DCHECK(kHeaderSize == kSize && kHeaderSize == "
+ << *type_->size().SingleValue() << ");\n";
+ }
+ hdr_ << " int32_t size = kHeaderSize;\n";
+ for (const Field& field : type_->ComputeAllFields()) {
+ if (field.index) {
+ auto index_name_and_type =
+ *ExtractSimpleFieldArraySize(*type_, *field.index);
+ size_t field_size = 0;
+ std::tie(field_size, std::ignore) = field.GetFieldSizeInformation();
+ hdr_ << " size += " << index_name_and_type.name << " * "
+ << field_size << ";\n";
+ }
+ }
+ hdr_ << " return size;\n";
+ hdr_ << " }\n\n";
+ hdr_ << " V8_INLINE static constexpr int32_t SizeFor(D o) {\n";
+ hdr_ << " return SizeFor(";
+ first = true;
+ for (auto field : *index_fields) {
+ if (!first) hdr_ << ", ";
+ // Subclasses of FixedArrayBase need to use the synchronized length
+ // accessor to be consistent (theoretically, FixedArrayBase classes
+ // can concurrently change size e.g. through left-trimming, although
+ // in practice this won't happen for Torque-generated classes) as well as
+ // explicitly convert to a Smi, since the C++-side accessors are
+ // int-based.
+ if (field.aggregate == TypeOracle::GetFixedArrayBaseType() &&
+ field.name_and_type.name == "length") {
+ hdr_ << "o.synchronized_length()";
+ } else {
+ hdr_ << "o." << field.name_and_type.name << "()";
+ }
+ first = false;
+ }
+ hdr_ << ");\n }\n";
+ hdr_ << "\n";
+ }
+
+ hdr_ << " friend class Factory;\n\n";
+
GenerateClassConstructors();
hdr_ << "};\n\n";
+
+ if (!type_->IsExtern()) {
+ GenerateClassExport(type_, hdr_, inl_);
+ }
}
void CppClassGenerator::GenerateClassCasts() {
@@ -3427,7 +3543,11 @@ void CppClassGenerator::GenerateClassConstructors() {
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
void CppClassGenerator::GenerateFieldAccessor(const Field& f) {
const Type* field_type = f.name_and_type.type;
- if (field_type == TypeOracle::GetVoidType() || f.index.has_value()) return;
+ if (field_type == TypeOracle::GetVoidType()) return;
+
+ // TODO(danno): Support generation of struct accessors
+ if (f.name_and_type.type->IsStructType()) return;
+
if (!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
return GenerateFieldAccessorForUntagged(f);
}
@@ -3439,7 +3559,8 @@ void CppClassGenerator::GenerateFieldAccessor(const Field& f) {
}
Error("Generation of field accessor for ", type_->name(),
- ":: ", f.name_and_type.name, " : ", *field_type, " is not supported.")
+ "::", f.name_and_type.name, " failed (type ", *field_type,
+ " is not supported).")
.Position(f.pos);
}
@@ -3458,48 +3579,101 @@ void CppClassGenerator::GenerateFieldAccessorForUntagged(const Field& f) {
}
const std::string& name = f.name_and_type.name;
const std::string type = constexpr_version->GetGeneratedTypeName();
- const std::string offset = "k" + CamelifyString(name) + "Offset";
+ std::string offset = "k" + CamelifyString(name) + "Offset";
// Generate declarations in header.
- hdr_ << " inline " << type << " " << name << "() const;\n";
- hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
+ if (f.index) {
+ hdr_ << " inline " << type << " " << name << "(int i) const;\n";
+ hdr_ << " inline void set_" << name << "(int i, " << type
+ << " value);\n\n";
+ } else {
+ hdr_ << " inline " << type << " " << name << "() const;\n";
+ hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
+ }
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
- inl_ << " return this->template ReadField<" << type << ">(" << offset
- << ");\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name << "(";
+ if (f.index) {
+ inl_ << "int i";
+ }
+ inl_ << ") const {\n";
+ if (f.index) {
+ size_t field_size;
+ std::string size_string;
+ std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ inl_ << " int offset = " << offset << " + i * " << field_size << ";\n";
+ inl_ << " return this->template ReadField<" << type << ">(offset);\n";
+ } else {
+ inl_ << " return this->template ReadField<" << type << ">(" << offset
+ << ");\n";
+ }
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
- inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(" << type
- << " value) {\n";
- inl_ << " this->template WriteField<" << type << ">(" << offset
- << ", value);\n";
+ inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(";
+ if (f.index) {
+ inl_ << "int i, ";
+ }
+ inl_ << type << " value) {\n";
+ if (f.index) {
+ size_t field_size;
+ std::string size_string;
+ std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ inl_ << " int offset = " << offset << " + i * " << field_size << ";\n";
+ inl_ << " this->template WriteField<" << type << ">(offset, value);\n";
+ } else {
+ inl_ << " this->template WriteField<" << type << ">(" << offset
+ << ", value);\n";
+ }
inl_ << "}\n\n";
}
void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
DCHECK(f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()));
- const std::string type = "Smi";
+ // Follow the convention to create Smi accessors with type int.
+ const std::string type = "int";
const std::string& name = f.name_and_type.name;
const std::string offset = "k" + CamelifyString(name) + "Offset";
// Generate declarations in header.
+ if (f.index) {
+ hdr_ << " inline " << type << " " << name << "(int i) const;\n";
+ hdr_ << " inline void set_" << name << "(int i, " << type
+ << " value);\n\n";
+ }
hdr_ << " inline " << type << " " << name << "() const;\n";
hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
- inl_ << " return TaggedField<Smi, " << offset << ">::load(*this);\n";
- inl_ << "}\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name << "(";
+ if (f.index) {
+ inl_ << "int i";
+ }
+ inl_ << ") const {\n";
+ if (f.index) {
+ inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+ inl_ << " return this->template ReadField<Smi>(offset).value();\n";
+ inl_ << "}\n";
+ } else {
+ inl_ << " return TaggedField<Smi, " << offset
+ << ">::load(*this).value();\n";
+ inl_ << "}\n";
+ }
inl_ << "template <class D, class P>\n";
- inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(" << type
- << " value) {\n";
- inl_ << " DCHECK(value.IsSmi());\n";
- inl_ << " WRITE_FIELD(*this, " << offset << ", value);\n";
+ inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(";
+ if (f.index) {
+ inl_ << "int i, ";
+ }
+ inl_ << type << " value) {\n";
+ if (f.index) {
+ inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+ inl_ << " WRITE_FIELD(*this, offset, Smi::FromInt(value));\n";
+ } else {
+ inl_ << " WRITE_FIELD(*this, " << offset << ", Smi::FromInt(value));\n";
+ }
inl_ << "}\n\n";
}
@@ -3517,11 +3691,13 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
if (!class_type && field_type != TypeOracle::GetObjectType()) {
hdr_ << " // Torque type: " << field_type->ToString() << "\n";
}
- hdr_ << " inline " << type << " " << name << "() const;\n";
- hdr_ << " inline " << type << " " << name
- << "(const Isolate* isolate) const;\n";
- hdr_ << " inline void set_" << name << "(" << type
- << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
+
+ hdr_ << " inline " << type << " " << name << "(" << (f.index ? "int i" : "")
+ << ") const;\n";
+ hdr_ << " inline " << type << " " << name << "(const Isolate* isolates"
+ << (f.index ? ", int i" : "") << ") const;\n";
+ hdr_ << " inline void set_" << name << "(" << (f.index ? "int i, " : "")
+ << type << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
std::string type_check;
for (const RuntimeType& runtime_type : field_type->GetRuntimeTypes()) {
@@ -3531,66 +3707,170 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name << "("
+ << (f.index ? "int i" : "") << ") const {\n";
inl_ << " const Isolate* isolate = GetIsolateForPtrCompr(*this);\n";
- inl_ << " return " << gen_name_ << "::" << name << "(isolate);\n";
+ inl_ << " return " << gen_name_ << "::" << name << "(isolate"
+ << (f.index ? ", i" : "") << ");\n";
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name
- << "(const Isolate* isolate) const {\n";
+ << "(const Isolate* isolate" << (f.index ? ", int i" : "")
+ << ") const {\n";
+
if (class_type) {
- inl_ << " return TaggedField<" << type << ", " << offset
- << ">::load(isolate, *this);\n";
+ if (f.index) {
+ inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+ inl_ << " return " << type
+ << "::cast(RELAXED_READ_FIELD(*this, offset));\n";
+ } else {
+ inl_ << " return TaggedField<" << type << ", " << offset
+ << ">::load(isolate, *this);\n";
+ }
} else {
// TODO(tebbi): load value as HeapObject when possible
- inl_ << " Object value = TaggedField<Object, " << offset
- << ">::load(isolate, *this);\n";
+ if (f.index) {
+ inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+ inl_ << " Object value = Object::cast(RELAXED_READ_FIELD(*this, "
+ "offset));\n";
+ } else {
+ inl_ << " Object value = TaggedField<Object, " << offset
+ << ">::load(isolate, *this);\n";
+ }
inl_ << " DCHECK(" << type_check << ");\n";
inl_ << " return value;\n";
}
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
- inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(" << type
- << " value, WriteBarrierMode mode) {\n";
+ inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(";
+ if (f.index) {
+ inl_ << "int i, ";
+ }
+ inl_ << type << " value, WriteBarrierMode mode) {\n";
inl_ << " SLOW_DCHECK(" << type_check << ");\n";
- inl_ << " WRITE_FIELD(*this, " << offset << ", value);\n";
+ if (f.index) {
+ inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+ inl_ << " WRITE_FIELD(*this, offset, value);\n";
+ } else {
+ inl_ << " WRITE_FIELD(*this, " << offset << ", value);\n";
+ }
inl_ << " CONDITIONAL_WRITE_BARRIER(*this, " << offset
<< ", value, mode);\n";
inl_ << "}\n\n";
}
+void EmitClassDefinitionHeadersIncludes(const std::string& basename,
+ std::stringstream& header,
+ std::stringstream& inline_header) {
+ header << "#include \"src/objects/fixed-array.h\"\n";
+ header << "#include \"src/objects/objects.h\"\n";
+ header << "#include \"src/objects/smi.h\"\n";
+ header << "#include \"torque-generated/field-offsets-tq.h\"\n";
+ header << "#include <type_traits>\n\n";
+
+ inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n";
+ inline_header << "#include "
+ "\"torque-generated/objects-body-descriptors-tq-inl.h\"\n\n";
+ inline_header << "#include \"src/objects/js-promise.h\"\n";
+ inline_header << "#include \"src/objects/js-weak-refs.h\"\n";
+ inline_header << "#include \"src/objects/module.h\"\n";
+ inline_header << "#include \"src/objects/objects-inl.h\"\n";
+ inline_header << "#include \"src/objects/script.h\"\n";
+ inline_header << "#include \"src/objects/shared-function-info.h\"\n";
+ inline_header << "#include \"src/objects/tagged-field.h\"\n\n";
+}
+
+void EmitClassDefinitionHeadersForwardDeclarations(std::stringstream& header) {
+ // Generate forward declarations for every class.
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
+ }
+ header << "using BuiltinPtr = Smi;\n\n";
+}
+
+void GenerateStructLayoutDescription(std::ostream& header,
+ const StructType* type) {
+ header << "struct TorqueGenerated" << CamelifyString(type->name())
+ << "Offsets {\n";
+ for (const Field& field : type->fields()) {
+ header << " static constexpr int k"
+ << CamelifyString(field.name_and_type.name)
+ << "Offset = " << *field.offset << ";\n";
+ }
+ header << " static constexpr int kSize = " << type->PackedSize() << ";\n";
+ header << "};\n\n";
+}
+
} // namespace
void ImplementationVisitor::GenerateClassDefinitions(
const std::string& output_directory) {
- std::stringstream header;
- std::stringstream inline_header;
+ std::stringstream external_header;
+ std::stringstream inline_external_header;
+ std::stringstream internal_header;
+ std::stringstream inline_internal_header;
std::stringstream implementation;
+ std::stringstream factory_header;
+ std::stringstream factory_impl;
std::string basename = "class-definitions-tq";
+ std::string internal_basename = "internal-" + basename;
std::string file_basename = output_directory + "/" + basename;
+ std::string internal_file_basename =
+ output_directory + "/" + internal_basename;
+ std::string factory_basename = "factory-tq";
+ std::string factory_file_basename = output_directory + "/" + factory_basename;
{
- IncludeGuardScope header_guard(header, basename + ".h");
- header << "#include \"src/objects/objects.h\"\n";
- header << "#include \"src/objects/smi.h\"\n";
- header << "#include \"torque-generated/field-offsets-tq.h\"\n";
- header << "#include <type_traits>\n\n";
- IncludeObjectMacrosScope header_macros(header);
- NamespaceScope header_namespaces(header, {"v8", "internal"});
- header << "using BuiltinPtr = Smi;\n\n";
-
- IncludeGuardScope inline_header_guard(inline_header, basename + "-inl.h");
- inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
- inline_header << "#include \"src/objects/js-promise.h\"\n";
- inline_header << "#include \"src/objects/js-weak-refs.h\"\n";
- inline_header << "#include \"src/objects/module.h\"\n";
- inline_header << "#include \"src/objects/objects-inl.h\"\n";
- inline_header << "#include \"src/objects/script.h\"\n";
- inline_header << "#include \"src/objects/shared-function-info.h\"\n\n";
- IncludeObjectMacrosScope inline_header_macros(inline_header);
- NamespaceScope inline_header_namespaces(inline_header, {"v8", "internal"});
+ IncludeGuardScope header_guard(external_header, basename + ".h");
+
+ IncludeGuardScope inline_header_guard(inline_external_header,
+ basename + "-inl.h");
+
+ IncludeGuardScope internal_header_guard(internal_header,
+ internal_basename + ".h");
+
+ IncludeGuardScope internal_inline_header_guard(
+ inline_internal_header, internal_basename + "-inl.h");
+
+ external_header
+ << "#include \"torque-generated/internal-class-definitions-tq.h\"\n";
+
+ EmitClassDefinitionHeadersIncludes(basename, external_header,
+ inline_external_header);
+
+ EmitClassDefinitionHeadersIncludes(internal_basename, internal_header,
+ inline_internal_header);
+
+ IncludeObjectMacrosScope header_macros(external_header);
+ IncludeObjectMacrosScope inline_header_macros(inline_external_header);
+
+ IncludeObjectMacrosScope internal_header_macros(internal_header);
+ IncludeObjectMacrosScope internal_inline_header_macros(
+ inline_internal_header);
+
+ NamespaceScope header_namespaces(external_header, {"v8", "internal"});
+ NamespaceScope inline_header_namespaces(inline_external_header,
+ {"v8", "internal"});
+ NamespaceScope internal_header_namespaces(internal_header,
+ {"v8", "internal"});
+ NamespaceScope internal_inline_header_namespaces(inline_internal_header,
+ {"v8", "internal"});
+
+ EmitClassDefinitionHeadersForwardDeclarations(external_header);
+ EmitClassDefinitionHeadersForwardDeclarations(internal_header);
+
+ factory_impl << "#include \"src/heap/factory.h\"\n";
+ factory_impl << "#include \"src/heap/factory-inl.h\"\n";
+ factory_impl << "#include \"src/heap/heap.h\"\n";
+ factory_impl << "#include \"src/heap/heap-inl.h\"\n";
+ factory_impl << "#include \"src/execution/isolate.h\"\n\n";
+ factory_impl
+ << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
+ NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
+ factory_impl << "\n";
implementation
<< "#include \"torque-generated/class-definitions-tq.h\"\n\n";
@@ -3618,23 +3898,112 @@ void ImplementationVisitor::GenerateClassDefinitions(
NamespaceScope implementation_namespaces(implementation,
{"v8", "internal"});
- // Generate forward declarations for every class.
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
- header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
- }
+ std::set<const StructType*, TypeLess> structs_used_in_classes;
+
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ std::stringstream& header = (type->IsExtern() || type->ShouldExport())
+ ? external_header
+ : internal_header;
+ std::stringstream& inline_header =
+ (type->IsExtern() || type->ShouldExport()) ? inline_external_header
+ : inline_internal_header;
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
if (type->GenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
g.GenerateClass();
}
+ for (const Field& f : type->fields()) {
+ const Type* field_type = f.name_and_type.type;
+ if (auto field_as_struct = field_type->StructSupertype()) {
+ structs_used_in_classes.insert(*field_as_struct);
+ }
+ }
+ if (type->ShouldExport()) {
+ factory_header << type->HandlifiedCppTypeName() << " New"
+ << type->name() << "(";
+ factory_impl << type->HandlifiedCppTypeName() << " Factory::New"
+ << type->name() << "(";
+
+ for (const Field& f : type->ComputeAllFields()) {
+ if (f.name_and_type.name == "map") continue;
+ if (!f.index) {
+ std::string type_string =
+ f.name_and_type.type->HandlifiedCppTypeName();
+ factory_header << type_string << " " << f.name_and_type.name
+ << ", ";
+ factory_impl << type_string << " " << f.name_and_type.name << ", ";
+ }
+ }
+
+ factory_header << "AllocationType allocation_type);\n";
+ factory_impl << "AllocationType allocation_type) {\n";
+
+ factory_impl << " int size = ";
+ const ClassType* super = type->GetSuperClass();
+ std::string gen_name = "TorqueGenerated" + type->name();
+ std::string gen_name_T =
+ gen_name + "<" + type->name() + ", " + super->name() + ">";
+ factory_impl << gen_name_T << "::SizeFor(";
+
+ bool first = true;
+ auto index_fields = GetOrderedUniqueIndexFields(*type);
+ CHECK(index_fields.has_value());
+ for (auto index_field : *index_fields) {
+ if (!first) {
+ factory_impl << ", ";
+ }
+ factory_impl << index_field.name_and_type.name;
+ first = false;
+ }
+
+ factory_impl << ");\n";
+ factory_impl << " ReadOnlyRoots roots(isolate());\n";
+ factory_impl << " HeapObject result =\n";
+ factory_impl << " "
+ "isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>"
+ "(size, allocation_type);\n";
+ factory_impl << " result.set_map_after_allocation(roots."
+ << SnakeifyString(type->name())
+ << "_map(), SKIP_WRITE_BARRIER);\n";
+ factory_impl << " " << type->HandlifiedCppTypeName()
+ << " result_handle(" << type->name()
+ << "::cast(result), isolate());\n";
+
+ for (const Field& f : type->ComputeAllFields()) {
+ if (f.name_and_type.name == "map") continue;
+ if (!f.index) {
+ factory_impl << " result_handle->set_"
+ << SnakeifyString(f.name_and_type.name) << "(";
+ if (f.name_and_type.type->IsSubtypeOf(
+ TypeOracle::GetTaggedType()) &&
+ !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ factory_impl << "*" << f.name_and_type.name
+ << ", SKIP_WRITE_BARRIER";
+ } else {
+ factory_impl << f.name_and_type.name;
+ }
+ factory_impl << ");\n";
+ }
+ }
+
+ factory_impl << " return result_handle;\n";
+ factory_impl << "}\n\n";
+ }
+ }
+
+ for (const StructType* type : structs_used_in_classes) {
+ if (type != TypeOracle::GetFloat64OrHoleType()) {
+ GenerateStructLayoutDescription(external_header, type);
+ }
}
}
- WriteFile(file_basename + ".h", header.str());
- WriteFile(file_basename + "-inl.h", inline_header.str());
+ WriteFile(file_basename + ".h", external_header.str());
+ WriteFile(file_basename + "-inl.h", inline_external_header.str());
WriteFile(file_basename + ".cc", implementation.str());
+ WriteFile(internal_file_basename + ".h", internal_header.str());
+ WriteFile(internal_file_basename + "-inl.h", inline_internal_header.str());
+ WriteFile(factory_file_basename + ".inc", factory_header.str());
+ WriteFile(factory_file_basename + ".cc", factory_impl.str());
}
namespace {
@@ -3651,11 +4020,24 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
for (const AggregateType* aggregate_type : hierarchy) {
for (const Field& f : aggregate_type->fields()) {
if (f.name_and_type.name == "map") continue;
- impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
- << "Brief(this->" << f.name_and_type.name << "());\n";
+ if (!f.index.has_value()) {
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
+ !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
+ if (f.name_and_type.type->StructSupertype()) {
+ // TODO(tebbi): Print struct fields too.
+ impl << "\" <struct field printing still unimplemented>\";\n";
+ } else {
+ impl << "this->" << f.name_and_type.name << "();\n";
+ }
+ } else {
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
+ << "Brief(this->" << f.name_and_type.name << "());\n";
+ }
+ }
}
}
- impl << " os << \"\\n\";\n";
+ impl << " os << '\\n';\n";
impl << "}\n\n";
}
} // namespace
@@ -3676,8 +4058,7 @@ void ImplementationVisitor::GeneratePrintDefinitions(
NamespaceScope impl_namespaces(impl, {"v8", "internal"});
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
+ for (const ClassType* type : TypeOracle::GetClasses()) {
if (!type->ShouldGeneratePrint()) continue;
if (type->GenerateCppClassDefinitions()) {
@@ -3699,6 +4080,142 @@ void ImplementationVisitor::GeneratePrintDefinitions(
WriteFile(output_directory + "/" + file_name, new_contents);
}
+void ImplementationVisitor::GenerateBodyDescriptors(
+ const std::string& output_directory) {
+ std::string file_name = "objects-body-descriptors-tq-inl";
+ std::stringstream h_contents;
+ {
+ IncludeGuardScope include_guard(h_contents, file_name + ".h");
+
+ h_contents << "\n#include \"src/objects/objects-body-descriptors.h\"\n";
+ h_contents << "\n#include \"torque-generated/class-definitions-tq.h\"\n";
+ h_contents
+ << "\n#include \"torque-generated/internal-class-definitions-tq.h\"\n";
+ h_contents << "\n#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
+
+ NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
+
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ std::string name = type->name();
+ if (!type->ShouldGenerateBodyDescriptor()) continue;
+
+ const ClassType* super_class = type->GetSuperClass();
+ std::string super_name = super_class->name();
+ h_contents << "class " << name
+ << "::BodyDescriptor final : public BodyDescriptorBase {\n";
+
+ h_contents << " public:\n";
+
+ h_contents << " static bool IsValidSlot(Map map, HeapObject obj, int "
+ "offset) {\n";
+
+ if (super_class == TypeOracle::GetHeapObjectType() ||
+ super_class == TypeOracle::GetFixedArrayBaseType()) {
+ h_contents << " if (offset < " << super_name
+ << "::kHeaderSize) return true;\n";
+ } else {
+ h_contents << " if (" << super_name
+ << "::BodyDescriptor::IsValidSlot(map, obj, offset)) return "
+ "true;\n";
+ }
+
+ h_contents << " return offset >= " << name
+ << "::kStartOfStrongFieldsOffset"
+ << " && offset < " << name << ""
+ << "::kEndOfStrongFieldsOffset;\n";
+ h_contents << " }\n\n";
+
+ h_contents << " template <typename ObjectVisitor>\n";
+ h_contents << " static inline void IterateBody(Map map, HeapObject obj, "
+ "int object_size, ObjectVisitor* v) {\n";
+
+ // There may be MaybeObjects embedded in the strong pointer section, which
+ // are not suppored.
+ for (auto& f : type->fields()) {
+ for (const Type* t : LowerType(f.name_and_type.type)) {
+ if (t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
+ !t->IsSubtypeOf(TypeOracle::GetObjectType())) {
+ Error("Cannot generate body descriptor for field ",
+ f.name_and_type.name, " of class ", name, " because ", *t,
+ " can contain tagged weak pointers.");
+ }
+ }
+ }
+
+ if (super_class != TypeOracle::GetHeapObjectType() &&
+ super_class != TypeOracle::GetFixedArrayBaseType()) {
+ h_contents
+ << " " << super_name
+ << "::BodyDescriptor::IterateBody(map, obj, object_size, v);\n";
+ }
+
+ h_contents << " if (" << name
+ << "::kStartOfStrongFieldsOffset != " << name
+ << "::kEndOfStrongFieldsOffset) {\n";
+ h_contents << " IteratePointers(obj, " << name
+ << "::kStartOfStrongFieldsOffset, " << name
+ << "::kEndOfStrongFieldsOffset, v);\n";
+ h_contents << " }\n";
+
+ h_contents << " if (" << name
+ << "::kStartOfWeakFieldsOffset != " << name
+ << "::kEndOfWeakFieldsOffset) {\n";
+ h_contents << " IterateCustomWeakPointers(obj, " << name
+ << "::kStartOfWeakFieldsOffset, " << name
+ << "::kEndOfWeakFieldsOffset, v);\n";
+ h_contents << " }\n";
+
+ // Since all of the index fields are at the end of the object and must
+ // only be Tagged values, emit only a single IteratePointers from the
+ // beginning of the first indexed field to the end of the object.
+ bool first_index_seen = false;
+ for (const Field& field : type->ComputeAllFields()) {
+ if (field.index && !first_index_seen) {
+ std::string indexed_field_name =
+ CamelifyString(field.name_and_type.name);
+ if (field.name_and_type.type->IsSubtypeOf(
+ TypeOracle::GetObjectType())) {
+ h_contents << " BodyDescriptorBase::IteratePointers(obj, "
+ << name << "::k" << indexed_field_name << "Offset, "
+ << name << "::SizeFor(" << name << "::cast(obj)), v);\n";
+ } else {
+ Error(
+ "generating body descriptors for indexed fields not subtype of "
+ "Object isn't (yet) supported");
+ }
+ first_index_seen = true;
+ }
+ if (first_index_seen) {
+ for (const Type* t : LowerType(field.name_and_type.type)) {
+ if (!t->IsSubtypeOf(TypeOracle::GetObjectType())) {
+ Error("cannot generate class body descriptor for \"",
+ type->name(),
+ "\", all fields of including and after the first indexed "
+ "member must no comprised only of subtypes of Object "
+ "(field \"",
+ field.name_and_type.name, "\" is not)");
+ }
+ }
+ }
+ }
+
+ h_contents << " }\n\n";
+
+ h_contents
+ << " static inline int SizeOf(Map map, HeapObject raw_object) {\n";
+ h_contents << " " << name << " object = " << name
+ << "::cast(raw_object);\n";
+ h_contents << " return " << name << "::SizeFor(object);\n";
+ h_contents << " }\n\n";
+
+ h_contents << "};\n";
+ }
+ }
+
+ WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
+}
+
namespace {
// Generate verification code for a single piece of class data, which might be
@@ -3778,7 +4295,7 @@ void GenerateClassFieldVerifier(const std::string& class_name,
// We only verify tagged types, not raw numbers or pointers. Structs
// consisting of tagged types are also included.
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
- !field_type->IsStructType())
+ !field_type->StructSupertype())
return;
if (field_type == TypeOracle::GetFloat64OrHoleType()) return;
// Do not verify if the field may be uninitialized.
@@ -3817,11 +4334,13 @@ void GenerateClassFieldVerifier(const std::string& class_name,
cc_contents << " {\n";
}
- if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
- for (const Field& field : struct_type->fields()) {
- GenerateFieldValueVerifier(class_name, f, field, *field.offset,
- std::to_string(struct_type->PackedSize()),
- cc_contents);
+ if (auto struct_type = field_type->StructSupertype()) {
+ for (const Field& field : (*struct_type)->fields()) {
+ if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ GenerateFieldValueVerifier(class_name, f, field, *field.offset,
+ std::to_string((*struct_type)->PackedSize()),
+ cc_contents);
+ }
}
} else {
GenerateFieldValueVerifier(class_name, f, f, 0, "kTaggedSize", cc_contents);
@@ -3858,8 +4377,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
// Generate forward declarations to avoid including any headers.
h_contents << "class Isolate;\n";
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
+ for (const ClassType* type : TypeOracle::GetClasses()) {
if (!type->ShouldGenerateVerify()) continue;
h_contents << "class " << type->name() << ";\n";
}
@@ -3869,8 +4387,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
h_contents << "class " << verifier_class << "{\n";
h_contents << " public:\n";
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
+ for (const ClassType* type : TypeOracle::GetClasses()) {
std::string name = type->name();
if (!type->ShouldGenerateVerify()) continue;
@@ -4072,7 +4589,7 @@ void ImplementationVisitor::GenerateCSATypes(
h_contents << ", ";
}
first = false;
- if (field.name_and_type.type->IsStructType()) {
+ if (field.name_and_type.type->StructSupertype()) {
h_contents << field.name_and_type.name << ".Flatten()";
} else {
h_contents << "std::make_tuple(" << field.name_and_type.name << ")";
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 0d58b19d46..0cd58aa922 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -23,7 +23,7 @@ namespace torque {
template <typename T>
class Binding;
-struct LocalValue;
+class LocalValue;
class ImplementationVisitor;
// LocationReference is the representation of an l-value, so a value that might
@@ -55,8 +55,7 @@ class LocationReference {
// pointer.
static LocationReference HeapReference(VisitResult heap_reference) {
LocationReference result;
- DCHECK(Type::MatchUnaryGeneric(heap_reference.type(),
- TypeOracle::GetReferenceGeneric()));
+ DCHECK(TypeOracle::MatchReferenceGeneric(heap_reference.type()));
result.heap_reference_ = std::move(heap_reference);
return result;
}
@@ -92,7 +91,17 @@ class LocationReference {
return result;
}
- bool IsConst() const { return temporary_.has_value(); }
+ bool IsConst() const {
+ if (IsHeapReference()) {
+ bool is_const;
+ bool success =
+ TypeOracle::MatchReferenceGeneric(heap_reference().type(), &is_const)
+ .has_value();
+ CHECK(success);
+ return is_const;
+ }
+ return IsTemporary();
+ }
bool IsVariableAccess() const { return variable_.has_value(); }
const VisitResult& variable() const {
@@ -130,8 +139,7 @@ class LocationReference {
const Type* ReferencedType() const {
if (IsHeapReference()) {
- return *Type::MatchUnaryGeneric(heap_reference().type(),
- TypeOracle::GetReferenceGeneric());
+ return *TypeOracle::MatchReferenceGeneric(heap_reference().type());
}
if (IsHeapSlice()) {
return *Type::MatchUnaryGeneric(heap_slice().type(),
@@ -337,8 +345,32 @@ class BlockBindings {
std::vector<std::unique_ptr<Binding<T>>> bindings_;
};
-struct LocalValue {
- LocationReference value;
+class LocalValue {
+ public:
+ explicit LocalValue(LocationReference reference)
+ : value(std::move(reference)) {}
+ explicit LocalValue(std::string inaccessible_explanation)
+ : inaccessible_explanation(std::move(inaccessible_explanation)) {}
+
+ LocationReference GetLocationReference(Binding<LocalValue>* binding) {
+ if (value) {
+ const LocationReference& ref = *value;
+ if (ref.IsVariableAccess()) {
+ // Attach the binding to enable the never-assigned-to lint check.
+ return LocationReference::VariableAccess(ref.GetVisitResult(), binding);
+ }
+ return ref;
+ } else {
+ Error("Cannot access ", binding->name(), ": ", inaccessible_explanation)
+ .Throw();
+ }
+ }
+
+ bool IsAccessible() const { return value.has_value(); }
+
+ private:
+ base::Optional<LocationReference> value;
+ std::string inaccessible_explanation;
};
struct LocalLabel {
@@ -358,9 +390,10 @@ template <>
inline bool Binding<LocalValue>::CheckWritten() const {
// Do the check only for non-const variables and non struct types.
auto binding = *manager_->current_bindings_[name_];
- const LocationReference& ref = binding->value;
+ if (!binding->IsAccessible()) return false;
+ const LocationReference& ref = binding->GetLocationReference(binding);
if (!ref.IsVariableAccess()) return false;
- return !ref.GetVisitResult().type()->IsStructType();
+ return !ref.GetVisitResult().type()->StructSupertype();
}
template <>
inline std::string Binding<LocalLabel>::BindingTypeString() const {
@@ -388,13 +421,13 @@ class ImplementationVisitor {
void GenerateBitFields(const std::string& output_directory);
void GeneratePrintDefinitions(const std::string& output_directory);
void GenerateClassDefinitions(const std::string& output_directory);
+ void GenerateBodyDescriptors(const std::string& output_directory);
void GenerateInstanceTypes(const std::string& output_directory);
void GenerateClassVerifiers(const std::string& output_directory);
void GenerateEnumVerifiers(const std::string& output_directory);
void GenerateClassDebugReaders(const std::string& output_directory);
void GenerateExportedMacrosAssembler(const std::string& output_directory);
void GenerateCSATypes(const std::string& output_directory);
- void GenerateCppForInternalClasses(const std::string& output_directory);
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
@@ -411,12 +444,12 @@ class ImplementationVisitor {
LocationReference GenerateFieldReference(VisitResult object,
const Field& field,
const ClassType* class_type);
- LocationReference GenerateFieldReference(
+ LocationReference GenerateFieldReferenceForInit(
VisitResult object, const Field& field,
const LayoutForInitialization& layout);
VisitResult GenerateArrayLength(
Expression* array_length, Namespace* nspace,
- const std::map<std::string, LocationReference>& bindings);
+ const std::map<std::string, LocalValue>& bindings);
VisitResult GenerateArrayLength(VisitResult object, const Field& field);
VisitResult GenerateArrayLength(const ClassType* class_type,
const InitializerResults& initializer_results,
@@ -432,11 +465,13 @@ class ImplementationVisitor {
VisitResult Visit(StructExpression* decl);
LocationReference GetLocationReference(Expression* location);
+ LocationReference LookupLocalValue(const std::string& name);
LocationReference GetLocationReference(IdentifierExpression* expr);
LocationReference GetLocationReference(DereferenceExpression* expr);
LocationReference GetLocationReference(FieldAccessExpression* expr);
LocationReference GenerateFieldAccess(
LocationReference reference, const std::string& fieldname,
+ bool ignore_stuct_field_constness = false,
base::Optional<SourcePosition> pos = {});
LocationReference GetLocationReference(ElementAccessExpression* expr);
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
index 3f1aec3777..a06c984629 100644
--- a/deps/v8/src/torque/instance-type-generator.cc
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -289,6 +289,8 @@ std::unique_ptr<InstanceTypeTree> AssignInstanceTypes() {
// - fully_defined_single_instance_types: This list is pairs of class name and
// instance type, for classes which have defined layouts and a single
// corresponding instance type.
+// - fully_defined_multiple_instance_types: This list is pairs of class name and
+// instance type, for classes which have defined layouts and subclasses.
// - only_declared_single_instance_types: This list is pairs of class name and
// instance type, for classes which have a single corresponding instance type
// and do not have layout definitions in Torque.
@@ -302,6 +304,7 @@ std::unique_ptr<InstanceTypeTree> AssignInstanceTypes() {
void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
std::ostream& values,
std::ostream& fully_defined_single_instance_types,
+ std::ostream& fully_defined_multiple_instance_types,
std::ostream& only_declared_single_instance_types,
std::ostream& fully_defined_range_instance_types,
std::ostream& only_declared_range_instance_types,
@@ -331,6 +334,7 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
for (auto& child : root->children) {
PrintInstanceTypes(
child.get(), definitions, values, fully_defined_single_instance_types,
+ fully_defined_multiple_instance_types,
only_declared_single_instance_types, fully_defined_range_instance_types,
only_declared_range_instance_types, inner_indent);
}
@@ -351,6 +355,11 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
: fully_defined_range_instance_types;
range_instance_types << " V(" << root->type->name() << ", FIRST_"
<< type_name << ", LAST_" << type_name << ") \\\n";
+ if (!root->type->IsExtern() && !root->type->IsAbstract() &&
+ !root->type->HasUndefinedLayout()) {
+ fully_defined_multiple_instance_types << " V(" << root->type->name()
+ << ", " << type_name << ") \\\n";
+ }
}
}
}
@@ -370,12 +379,14 @@ void ImplementationVisitor::GenerateInstanceTypes(
std::unique_ptr<InstanceTypeTree> instance_types = AssignInstanceTypes();
std::stringstream values_list;
std::stringstream fully_defined_single_instance_types;
+ std::stringstream fully_defined_multiple_instance_types;
std::stringstream only_declared_single_instance_types;
std::stringstream fully_defined_range_instance_types;
std::stringstream only_declared_range_instance_types;
if (instance_types != nullptr) {
PrintInstanceTypes(instance_types.get(), header, values_list,
fully_defined_single_instance_types,
+ fully_defined_multiple_instance_types,
only_declared_single_instance_types,
fully_defined_range_instance_types,
only_declared_range_instance_types, " ");
@@ -394,6 +405,12 @@ void ImplementationVisitor::GenerateInstanceTypes(
header << fully_defined_single_instance_types.str();
header << "\n";
+ header << "// Pairs of (ClassName, INSTANCE_TYPE) for classes that have\n";
+ header << "// full Torque definitions and subclasses.\n";
+ header << "#define TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(V) \\\n";
+ header << fully_defined_multiple_instance_types.str();
+ header << "\n";
+
header << "// Pairs of (ClassName, INSTANCE_TYPE) for classes that are\n";
header << "// declared but not defined in Torque. These classes may\n";
header << "// correspond with actual C++ classes, but they are not\n";
@@ -416,10 +433,9 @@ void ImplementationVisitor::GenerateInstanceTypes(
header << only_declared_range_instance_types.str();
header << "\n";
- header << "// Instance types for Torque-internal classes.\n";
- header << "#define TORQUE_INTERNAL_INSTANCE_TYPES(V) \\\n";
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
+ header << "// Instance types for non-extern Torque classes.\n";
+ header << "#define TORQUE_INSTANCE_TYPES(V) \\\n";
+ for (const ClassType* type : TypeOracle::GetClasses()) {
if (type->IsExtern()) continue;
std::string type_name =
CapifyStringWithUnderscores(type->name()) + "_TYPE";
@@ -427,11 +443,11 @@ void ImplementationVisitor::GenerateInstanceTypes(
}
header << "\n";
- header << "// Struct list entries for Torque-internal classes.\n";
- header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n";
- for (const TypeAlias* alias : GlobalContext::GetClasses()) {
- const ClassType* type = ClassType::DynamicCast(alias->type());
+ header << "// Map list macros for non-extern Torque classes.\n";
+ header << "#define TORQUE_INTERNAL_VARSIZE_CLASS_LIST_GENERATOR(V, _) \\\n";
+ for (const ClassType* type : TypeOracle::GetClasses()) {
if (type->IsExtern()) continue;
+ if (!type->HasIndexedField()) continue;
std::string type_name =
CapifyStringWithUnderscores(type->name()) + "_TYPE";
std::string variable_name = SnakeifyString(type->name());
@@ -439,6 +455,21 @@ void ImplementationVisitor::GenerateInstanceTypes(
<< variable_name << ") \\\n";
}
header << "\n";
+ header << "#define TORQUE_INTERNAL_FIXED_CLASS_LIST_GENERATOR(V, _) \\\n";
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ if (type->IsExtern()) continue;
+ if (type->HasIndexedField()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ std::string variable_name = SnakeifyString(type->name());
+ header << " V(_, " << type_name << ", " << type->name() << ", "
+ << variable_name << ") \\\n";
+ }
+ header << "\n";
+ header << "#define TORQUE_INTERNAL_CLASS_LIST_GENERATOR(V, _) \\\n";
+ header << " TORQUE_INTERNAL_VARSIZE_CLASS_LIST_GENERATOR(V, _) \\\n";
+ header << " TORQUE_INTERNAL_FIXED_CLASS_LIST_GENERATOR(V, _)\n";
+ header << "\n";
}
std::string output_header_path = output_directory + "/" + file_name;
WriteFile(output_header_path, header.str());
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
index 81f5c6184c..6acef3018c 100644
--- a/deps/v8/src/torque/instructions.cc
+++ b/deps/v8/src/torque/instructions.cc
@@ -48,6 +48,11 @@ void PeekInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->Push(type);
}
+void PeekInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Push(locations->Peek(slot));
+}
+
void PokeInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
const Type* type = stack->Top();
@@ -59,26 +64,71 @@ void PokeInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->Pop();
}
+void PokeInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Poke(slot, locations->Pop());
+}
+
void DeleteRangeInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
stack->DeleteRange(range);
}
+void DeleteRangeInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->DeleteRange(range);
+}
+
void PushUninitializedInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
stack->Push(type);
}
+void PushUninitializedInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Push(GetValueDefinition());
+}
+
+DefinitionLocation PushUninitializedInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
void PushBuiltinPointerInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
stack->Push(type);
}
+void PushBuiltinPointerInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Push(GetValueDefinition());
+}
+
+DefinitionLocation PushBuiltinPointerInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
void NamespaceConstantInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
stack->PushMany(LowerType(constant->type()));
}
+void NamespaceConstantInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ for (std::size_t i = 0; i < GetValueDefinitionCount(); ++i) {
+ locations->Push(GetValueDefinition(i));
+ }
+}
+
+std::size_t NamespaceConstantInstruction::GetValueDefinitionCount() const {
+ return LowerType(constant->type()).size();
+}
+
+DefinitionLocation NamespaceConstantInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
void InstructionBase::InvalidateTransientTypes(
Stack<const Type*>* stack) const {
auto current = stack->begin();
@@ -113,6 +163,26 @@ void CallIntrinsicInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->PushMany(LowerType(intrinsic->signature().return_type));
}
+void CallIntrinsicInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ auto parameter_types =
+ LowerParameterTypes(intrinsic->signature().parameter_types);
+ locations->PopMany(parameter_types.size());
+ for (std::size_t i = 0; i < GetValueDefinitionCount(); ++i) {
+ locations->Push(DefinitionLocation::Instruction(this, i));
+ }
+}
+
+std::size_t CallIntrinsicInstruction::GetValueDefinitionCount() const {
+ return LowerType(intrinsic->signature().return_type).size();
+}
+
+DefinitionLocation CallIntrinsicInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> parameter_types =
@@ -140,6 +210,39 @@ void CallCsaMacroInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->PushMany(LowerType(macro->signature().return_type));
}
+void CallCsaMacroInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ auto parameter_types =
+ LowerParameterTypes(macro->signature().parameter_types);
+ locations->PopMany(parameter_types.size());
+
+ if (catch_block) {
+ locations->Push(*GetExceptionObjectDefinition());
+ (*catch_block)->MergeInputDefinitions(*locations, worklist);
+ locations->Pop();
+ }
+
+ for (std::size_t i = 0; i < GetValueDefinitionCount(); ++i) {
+ locations->Push(GetValueDefinition(i));
+ }
+}
+
+base::Optional<DefinitionLocation>
+CallCsaMacroInstruction::GetExceptionObjectDefinition() const {
+ if (!catch_block) return base::nullopt;
+ return DefinitionLocation::Instruction(this, GetValueDefinitionCount());
+}
+
+std::size_t CallCsaMacroInstruction::GetValueDefinitionCount() const {
+ return LowerType(macro->signature().return_type).size();
+}
+
+DefinitionLocation CallCsaMacroInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
void CallCsaMacroAndBranchInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
std::vector<const Type*> parameter_types =
@@ -188,6 +291,78 @@ void CallCsaMacroAndBranchInstruction::TypeInstruction(
}
}
+void CallCsaMacroAndBranchInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ auto parameter_types =
+ LowerParameterTypes(macro->signature().parameter_types);
+ locations->PopMany(parameter_types.size());
+
+ for (std::size_t label_index = 0; label_index < label_blocks.size();
+ ++label_index) {
+ const std::size_t count = GetLabelValueDefinitionCount(label_index);
+ for (std::size_t i = 0; i < count; ++i) {
+ locations->Push(GetLabelValueDefinition(label_index, i));
+ }
+ label_blocks[label_index]->MergeInputDefinitions(*locations, worklist);
+ locations->PopMany(count);
+ }
+
+ if (catch_block) {
+ locations->Push(*GetExceptionObjectDefinition());
+ (*catch_block)->MergeInputDefinitions(*locations, worklist);
+ locations->Pop();
+ }
+
+ if (macro->signature().return_type != TypeOracle::GetNeverType()) {
+ if (return_continuation) {
+ const std::size_t count = GetValueDefinitionCount();
+ for (std::size_t i = 0; i < count; ++i) {
+ locations->Push(GetValueDefinition(i));
+ }
+ (*return_continuation)->MergeInputDefinitions(*locations, worklist);
+ locations->PopMany(count);
+ }
+ }
+}
+
+std::size_t CallCsaMacroAndBranchInstruction::GetLabelCount() const {
+ return label_blocks.size();
+}
+
+std::size_t CallCsaMacroAndBranchInstruction::GetLabelValueDefinitionCount(
+ std::size_t label) const {
+ DCHECK_LT(label, GetLabelCount());
+ return LowerParameterTypes(macro->signature().labels[label].types).size();
+}
+
+DefinitionLocation CallCsaMacroAndBranchInstruction::GetLabelValueDefinition(
+ std::size_t label, std::size_t index) const {
+ DCHECK_LT(index, GetLabelValueDefinitionCount(label));
+ std::size_t offset = GetValueDefinitionCount() + (catch_block ? 1 : 0);
+ for (std::size_t label_index = 0; label_index < label; ++label_index) {
+ offset += GetLabelValueDefinitionCount(label_index);
+ }
+ return DefinitionLocation::Instruction(this, offset + index);
+}
+
+std::size_t CallCsaMacroAndBranchInstruction::GetValueDefinitionCount() const {
+ if (macro->signature().return_type == TypeOracle::GetNeverType()) return 0;
+ if (!return_continuation) return 0;
+ return LowerType(macro->signature().return_type).size();
+}
+
+DefinitionLocation CallCsaMacroAndBranchInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
+base::Optional<DefinitionLocation>
+CallCsaMacroAndBranchInstruction::GetExceptionObjectDefinition() const {
+ if (!catch_block) return base::nullopt;
+ return DefinitionLocation::Instruction(this, GetValueDefinitionCount());
+}
+
void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
@@ -208,6 +383,37 @@ void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->PushMany(LowerType(builtin->signature().return_type));
}
+void CallBuiltinInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->PopMany(argc);
+
+ if (catch_block) {
+ locations->Push(*GetExceptionObjectDefinition());
+ (*catch_block)->MergeInputDefinitions(*locations, worklist);
+ locations->Pop();
+ }
+
+ for (std::size_t i = 0; i < GetValueDefinitionCount(); ++i) {
+ locations->Push(GetValueDefinition(i));
+ }
+}
+
+std::size_t CallBuiltinInstruction::GetValueDefinitionCount() const {
+ return LowerType(builtin->signature().return_type).size();
+}
+
+DefinitionLocation CallBuiltinInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
+base::Optional<DefinitionLocation>
+CallBuiltinInstruction::GetExceptionObjectDefinition() const {
+ if (!catch_block) return base::nullopt;
+ return DefinitionLocation::Instruction(this, GetValueDefinitionCount());
+}
+
void CallBuiltinPointerInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
@@ -216,12 +422,31 @@ void CallBuiltinPointerInstruction::TypeInstruction(
if (argument_types != LowerParameterTypes(f->parameter_types())) {
ReportError("wrong argument types");
}
+ DCHECK_EQ(type, f);
// TODO(tebbi): Only invalidate transient types if the function pointer type
// is transitioning.
InvalidateTransientTypes(stack);
stack->PushMany(LowerType(f->return_type()));
}
+void CallBuiltinPointerInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->PopMany(argc + 1);
+ for (std::size_t i = 0; i < GetValueDefinitionCount(); ++i) {
+ locations->Push(GetValueDefinition(i));
+ }
+}
+
+std::size_t CallBuiltinPointerInstruction::GetValueDefinitionCount() const {
+ return LowerType(type->return_type()).size();
+}
+
+DefinitionLocation CallBuiltinPointerInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
void CallRuntimeInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
@@ -246,6 +471,42 @@ void CallRuntimeInstruction::TypeInstruction(Stack<const Type*>* stack,
}
}
+void CallRuntimeInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->PopMany(argc);
+
+ if (catch_block) {
+ locations->Push(*GetExceptionObjectDefinition());
+ (*catch_block)->MergeInputDefinitions(*locations, worklist);
+ locations->Pop();
+ }
+
+ const Type* return_type = runtime_function->signature().return_type;
+ if (return_type != TypeOracle::GetNeverType()) {
+ for (std::size_t i = 0; i < GetValueDefinitionCount(); ++i) {
+ locations->Push(GetValueDefinition(i));
+ }
+ }
+}
+
+std::size_t CallRuntimeInstruction::GetValueDefinitionCount() const {
+ const Type* return_type = runtime_function->signature().return_type;
+ if (return_type == TypeOracle::GetNeverType()) return 0;
+ return LowerType(return_type).size();
+}
+
+DefinitionLocation CallRuntimeInstruction::GetValueDefinition(
+ std::size_t index) const {
+ DCHECK_LT(index, GetValueDefinitionCount());
+ return DefinitionLocation::Instruction(this, index);
+}
+
+base::Optional<DefinitionLocation>
+CallRuntimeInstruction::GetExceptionObjectDefinition() const {
+ if (!catch_block) return base::nullopt;
+ return DefinitionLocation::Instruction(this, GetValueDefinitionCount());
+}
+
void BranchInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
const Type* condition_type = stack->Pop();
@@ -256,17 +517,35 @@ void BranchInstruction::TypeInstruction(Stack<const Type*>* stack,
if_false->SetInputTypes(*stack);
}
+void BranchInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Pop();
+ if_true->MergeInputDefinitions(*locations, worklist);
+ if_false->MergeInputDefinitions(*locations, worklist);
+}
+
void ConstexprBranchInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
if_true->SetInputTypes(*stack);
if_false->SetInputTypes(*stack);
}
+void ConstexprBranchInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ if_true->MergeInputDefinitions(*locations, worklist);
+ if_false->MergeInputDefinitions(*locations, worklist);
+}
+
void GotoInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
destination->SetInputTypes(*stack);
}
+void GotoInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ destination->MergeInputDefinitions(*locations, worklist);
+}
+
void GotoExternalInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
if (variable_names.size() != stack->Size()) {
@@ -274,22 +553,45 @@ void GotoExternalInstruction::TypeInstruction(Stack<const Type*>* stack,
}
}
+void GotoExternalInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {}
+
void ReturnInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
cfg->SetReturnType(stack->Pop());
}
+void ReturnInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Pop();
+}
+
void PrintConstantStringInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {}
+void PrintConstantStringInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {}
+
void AbortInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {}
+void AbortInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {}
+
void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
stack->Poke(stack->AboveTop() - 1, destination_type);
}
+void UnsafeCastInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Poke(locations->AboveTop() - 1, GetValueDefinition());
+}
+
+DefinitionLocation UnsafeCastInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
void LoadReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
ExpectType(TypeOracle::GetIntPtrType(), stack->Pop());
@@ -298,6 +600,17 @@ void LoadReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->Push(type);
}
+void LoadReferenceInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Pop();
+ locations->Pop();
+ locations->Push(GetValueDefinition());
+}
+
+DefinitionLocation LoadReferenceInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
void StoreReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
ExpectSubtype(stack->Pop(), type);
@@ -305,12 +618,29 @@ void StoreReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
ExpectSubtype(stack->Pop(), TypeOracle::GetHeapObjectType());
}
+void StoreReferenceInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Pop();
+ locations->Pop();
+ locations->Pop();
+}
+
void LoadBitFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
ExpectType(bit_field_struct_type, stack->Pop());
stack->Push(bit_field.name_and_type.type);
}
+void LoadBitFieldInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Pop();
+ locations->Push(GetValueDefinition());
+}
+
+DefinitionLocation LoadBitFieldInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
void StoreBitFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
ExpectSubtype(bit_field.name_and_type.type, stack->Pop());
@@ -318,6 +648,17 @@ void StoreBitFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
stack->Push(bit_field_struct_type);
}
+void StoreBitFieldInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ locations->Pop();
+ locations->Pop();
+ locations->Push(GetValueDefinition());
+}
+
+DefinitionLocation StoreBitFieldInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
bool CallRuntimeInstruction::IsBlockTerminator() const {
return is_tailcall || runtime_function->signature().return_type ==
TypeOracle::GetNeverType();
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index cb560274fe..2d5eff71da 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -50,11 +50,14 @@ class RuntimeFunction;
V(AbortInstruction) \
V(UnsafeCastInstruction)
-#define TORQUE_INSTRUCTION_BOILERPLATE() \
- static const InstructionKind kKind; \
- std::unique_ptr<InstructionBase> Clone() const override; \
- void Assign(const InstructionBase& other) override; \
- void TypeInstruction(Stack<const Type*>* stack, ControlFlowGraph* cfg) \
+#define TORQUE_INSTRUCTION_BOILERPLATE() \
+ static const InstructionKind kKind; \
+ std::unique_ptr<InstructionBase> Clone() const override; \
+ void Assign(const InstructionBase& other) override; \
+ void TypeInstruction(Stack<const Type*>* stack, ControlFlowGraph* cfg) \
+ const override; \
+ void RecomputeDefinitionLocations(Stack<DefinitionLocation>* locations, \
+ Worklist<Block*>* worklist) \
const override;
enum class InstructionKind {
@@ -63,6 +66,115 @@ enum class InstructionKind {
#undef ENUM_ITEM
};
+struct InstructionBase;
+
+class DefinitionLocation {
+ public:
+ enum class Kind {
+ kInvalid,
+ kParameter,
+ kPhi,
+ kInstruction,
+ };
+
+ DefinitionLocation() : kind_(Kind::kInvalid), location_(nullptr), index_(0) {}
+
+ static DefinitionLocation Parameter(std::size_t index) {
+ return DefinitionLocation(Kind::kParameter, nullptr, index);
+ }
+
+ static DefinitionLocation Phi(const Block* block, std::size_t index) {
+ return DefinitionLocation(Kind::kPhi, block, index);
+ }
+
+ static DefinitionLocation Instruction(const InstructionBase* instruction,
+ std::size_t index = 0) {
+ return DefinitionLocation(Kind::kInstruction, instruction, index);
+ }
+
+ Kind GetKind() const { return kind_; }
+ bool IsValid() const { return kind_ != Kind::kInvalid; }
+ bool IsParameter() const { return kind_ == Kind::kParameter; }
+ bool IsPhi() const { return kind_ == Kind::kPhi; }
+ bool IsInstruction() const { return kind_ == Kind::kInstruction; }
+
+ std::size_t GetParameterIndex() const {
+ DCHECK(IsParameter());
+ return index_;
+ }
+
+ const Block* GetPhiBlock() const {
+ DCHECK(IsPhi());
+ return reinterpret_cast<const Block*>(location_);
+ }
+
+ bool IsPhiFromBlock(const Block* block) const {
+ return IsPhi() && GetPhiBlock() == block;
+ }
+
+ std::size_t GetPhiIndex() const {
+ DCHECK(IsPhi());
+ return index_;
+ }
+
+ const InstructionBase* GetInstruction() const {
+ DCHECK(IsInstruction());
+ return reinterpret_cast<const InstructionBase*>(location_);
+ }
+
+ std::size_t GetInstructionIndex() const {
+ DCHECK(IsInstruction());
+ return index_;
+ }
+
+ bool operator==(const DefinitionLocation& other) const {
+ if (kind_ != other.kind_) return false;
+ if (location_ != other.location_) return false;
+ return index_ == other.index_;
+ }
+
+ bool operator!=(const DefinitionLocation& other) const {
+ return !operator==(other);
+ }
+
+ bool operator<(const DefinitionLocation& other) const {
+ if (kind_ != other.kind_) {
+ return static_cast<int>(kind_) < static_cast<int>(other.kind_);
+ }
+ if (location_ != other.location_) {
+ return location_ < other.location_;
+ }
+ return index_ < other.index_;
+ }
+
+ private:
+ DefinitionLocation(Kind kind, const void* location, std::size_t index)
+ : kind_(kind), location_(location), index_(index) {}
+
+ Kind kind_;
+ const void* location_;
+ std::size_t index_;
+};
+
+inline std::ostream& operator<<(std::ostream& stream,
+ const DefinitionLocation& loc) {
+ switch (loc.GetKind()) {
+ case DefinitionLocation::Kind::kInvalid:
+ return stream << "DefinitionLocation::Invalid()";
+ case DefinitionLocation::Kind::kParameter:
+ return stream << "DefinitionLocation::Parameter("
+ << loc.GetParameterIndex() << ")";
+ case DefinitionLocation::Kind::kPhi:
+ return stream << "DefinitionLocation::Phi(" << std::hex
+ << (uint64_t)loc.GetPhiBlock() << std::dec << ", "
+ << loc.GetPhiIndex() << ")";
+ case DefinitionLocation::Kind::kInstruction:
+ return stream << "DefinitionLocation::Instruction(" << std::hex
+ << (uint64_t)loc.GetInstruction() << std::dec << ", "
+ << loc.GetInstructionIndex() << ")";
+ }
+}
+
struct InstructionBase {
InstructionBase() : pos(CurrentSourcePosition::Get()) {}
virtual std::unique_ptr<InstructionBase> Clone() const = 0;
@@ -71,6 +183,9 @@ struct InstructionBase {
virtual void TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const = 0;
+ virtual void RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations,
+ Worklist<Block*>* worklist) const = 0;
void InvalidateTransientTypes(Stack<const Type*>* stack) const;
virtual bool IsBlockTerminator() const { return false; }
virtual void AppendSuccessorBlocks(std::vector<Block*>* block_list) const {}
@@ -141,6 +256,10 @@ class Instruction {
void TypeInstruction(Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
return instruction_->TypeInstruction(stack, cfg);
}
+ void RecomputeDefinitionLocations(Stack<DefinitionLocation>* locations,
+ Worklist<Block*>* worklist) const {
+ instruction_->RecomputeDefinitionLocations(locations, worklist);
+ }
InstructionBase* operator->() { return instruction_.get(); }
const InstructionBase* operator->() const { return instruction_.get(); }
@@ -183,6 +302,8 @@ struct PushUninitializedInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit PushUninitializedInstruction(const Type* type) : type(type) {}
+ DefinitionLocation GetValueDefinition() const;
+
const Type* type;
};
@@ -193,6 +314,8 @@ struct PushBuiltinPointerInstruction : InstructionBase {
DCHECK(type->IsBuiltinPointerType());
}
+ DefinitionLocation GetValueDefinition() const;
+
std::string external_name;
const Type* type;
};
@@ -202,12 +325,18 @@ struct NamespaceConstantInstruction : InstructionBase {
explicit NamespaceConstantInstruction(NamespaceConstant* constant)
: constant(constant) {}
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+
NamespaceConstant* constant;
};
struct LoadReferenceInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
explicit LoadReferenceInstruction(const Type* type) : type(type) {}
+
+ DefinitionLocation GetValueDefinition() const;
+
const Type* type;
};
@@ -224,6 +353,9 @@ struct LoadBitFieldInstruction : InstructionBase {
BitField bit_field)
: bit_field_struct_type(bit_field_struct_type),
bit_field(std::move(bit_field)) {}
+
+ DefinitionLocation GetValueDefinition() const;
+
const BitFieldStructType* bit_field_struct_type;
BitField bit_field;
};
@@ -236,6 +368,9 @@ struct StoreBitFieldInstruction : InstructionBase {
BitField bit_field)
: bit_field_struct_type(bit_field_struct_type),
bit_field(std::move(bit_field)) {}
+
+ DefinitionLocation GetValueDefinition() const;
+
const BitFieldStructType* bit_field_struct_type;
BitField bit_field;
};
@@ -249,6 +384,9 @@ struct CallIntrinsicInstruction : InstructionBase {
specialization_types(std::move(specialization_types)),
constexpr_arguments(constexpr_arguments) {}
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+
Intrinsic* intrinsic;
TypeVector specialization_types;
std::vector<std::string> constexpr_arguments;
@@ -266,6 +404,10 @@ struct CallCsaMacroInstruction : InstructionBase {
if (catch_block) block_list->push_back(*catch_block);
}
+ base::Optional<DefinitionLocation> GetExceptionObjectDefinition() const;
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+
Macro* macro;
std::vector<std::string> constexpr_arguments;
base::Optional<Block*> catch_block;
@@ -290,6 +432,14 @@ struct CallCsaMacroAndBranchInstruction : InstructionBase {
for (Block* block : label_blocks) block_list->push_back(block);
}
+ std::size_t GetLabelCount() const;
+ std::size_t GetLabelValueDefinitionCount(std::size_t label) const;
+ DefinitionLocation GetLabelValueDefinition(std::size_t label,
+ std::size_t index) const;
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+ base::Optional<DefinitionLocation> GetExceptionObjectDefinition() const;
+
Macro* macro;
std::vector<std::string> constexpr_arguments;
base::Optional<Block*> return_continuation;
@@ -310,6 +460,10 @@ struct CallBuiltinInstruction : InstructionBase {
if (catch_block) block_list->push_back(*catch_block);
}
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+ base::Optional<DefinitionLocation> GetExceptionObjectDefinition() const;
+
bool is_tailcall;
Builtin* builtin;
size_t argc;
@@ -323,6 +477,9 @@ struct CallBuiltinPointerInstruction : InstructionBase {
const BuiltinPointerType* type, size_t argc)
: is_tailcall(is_tailcall), type(type), argc(argc) {}
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+
bool is_tailcall;
const BuiltinPointerType* type;
size_t argc;
@@ -342,6 +499,10 @@ struct CallRuntimeInstruction : InstructionBase {
if (catch_block) block_list->push_back(*catch_block);
}
+ std::size_t GetValueDefinitionCount() const;
+ DefinitionLocation GetValueDefinition(std::size_t index) const;
+ base::Optional<DefinitionLocation> GetExceptionObjectDefinition() const;
+
bool is_tailcall;
RuntimeFunction* runtime_function;
size_t argc;
@@ -434,6 +595,8 @@ struct UnsafeCastInstruction : InstructionBase {
explicit UnsafeCastInstruction(const Type* destination_type)
: destination_type(destination_type) {}
+ DefinitionLocation GetValueDefinition() const;
+
const Type* destination_type;
};
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index cb41cff946..ad7d906d54 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -88,10 +88,10 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateClassVerifiers(output_directory);
implementation_visitor.GenerateClassDebugReaders(output_directory);
implementation_visitor.GenerateEnumVerifiers(output_directory);
+ implementation_visitor.GenerateBodyDescriptors(output_directory);
implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
implementation_visitor.GenerateCSATypes(output_directory);
implementation_visitor.GenerateInstanceTypes(output_directory);
- implementation_visitor.GenerateCppForInternalClasses(output_directory);
implementation_visitor.EndCSAFiles();
implementation_visitor.GenerateImplementation(output_directory);
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index c6b63e2234..e63827db2c 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -8,7 +8,8 @@
#include <stdexcept>
#include <unordered_map>
-#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/torque/ast.h"
#include "src/torque/constants.h"
#include "src/torque/declarations.h"
#include "src/torque/earley-parser.h"
@@ -41,6 +42,8 @@ class BuildFlags : public ContextualClass<BuildFlags> {
build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID;
build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES;
build_flags_["V8_DOUBLE_FIELDS_UNBOXING"] = V8_DOUBLE_FIELDS_UNBOXING;
+ build_flags_["V8_ARRAY_BUFFER_EXTENSION_BOOL"] =
+ V8_ARRAY_BUFFER_EXTENSION_BOOL;
build_flags_["TRUE_FOR_TESTING"] = true;
build_flags_["FALSE_FOR_TESTING"] = false;
}
@@ -300,6 +303,14 @@ void CheckNotDeferredStatement(Statement* statement) {
}
}
+TypeExpression* AddConstexpr(TypeExpression* type) {
+ BasicTypeExpression* basic = BasicTypeExpression::DynamicCast(type);
+ if (!basic) Error("Unsupported extends clause.").Throw();
+ return MakeNode<BasicTypeExpression>(basic->namespace_qualification,
+ CONSTEXPR_TYPE_PREFIX + basic->name,
+ basic->generic_arguments);
+}
+
Expression* MakeCall(IdentifierExpression* callee,
base::Optional<Expression*> target,
std::vector<Expression*> arguments,
@@ -547,9 +558,24 @@ base::Optional<ParseResult> MakeIntrinsicDeclaration(
return ParseResult{result};
}
+namespace {
+bool HasExportAnnotation(ParseResultIterator* child_results,
+ const char* declaration) {
+ auto annotations = child_results->NextAs<std::vector<Annotation>>();
+ if (annotations.size()) {
+ if (annotations.size() > 1 || annotations[0].name->value != "@export") {
+ Error(declaration,
+ " declarations only support a single @export annotation");
+ }
+ return true;
+ }
+ return false;
+}
+} // namespace
+
base::Optional<ParseResult> MakeTorqueMacroDeclaration(
ParseResultIterator* child_results) {
- auto export_to_csa = child_results->NextAs<bool>();
+ bool export_to_csa = HasExportAnnotation(child_results, "macro");
auto transitioning = child_results->NextAs<bool>();
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<Identifier*>();
@@ -644,7 +670,7 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
NamingConventionError("Type", name, "UpperCamelCase");
}
auto generic_parameters = child_results->NextAs<GenericParameters>();
- auto extends = child_results->NextAs<base::Optional<Identifier*>>();
+ auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
TypeDeclaration* type_decl = MakeNode<AbstractTypeDeclaration>(
name, transient, extends, std::move(generates));
@@ -663,11 +689,9 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + name->value);
constexpr_name->pos = name->pos;
- base::Optional<Identifier*> constexpr_extends;
+ base::Optional<TypeExpression*> constexpr_extends;
if (extends) {
- constexpr_extends =
- MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + (*extends)->value);
- (*constexpr_extends)->pos = name->pos;
+ constexpr_extends = AddConstexpr(*extends);
}
TypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
constexpr_name, transient, constexpr_extends, constexpr_generates);
@@ -833,7 +857,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
child_results,
{ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
- ANNOTATION_GENERATE_CPP_CLASS,
+ ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
+ ANNOTATION_EXPORT_CPP_CLASS,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
@@ -852,6 +877,12 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) {
flags |= ClassFlag::kGenerateCppClassDefinitions;
}
+ if (annotations.Contains(ANNOTATION_GENERATE_BODY_DESCRIPTOR)) {
+ flags |= ClassFlag::kGenerateBodyDescriptor;
+ }
+ if (annotations.Contains(ANNOTATION_EXPORT_CPP_CLASS)) {
+ flags |= ClassFlag::kExport;
+ }
if (annotations.Contains(ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT)) {
flags |= ClassFlag::kHighestInstanceTypeWithinParent;
}
@@ -943,7 +974,8 @@ base::Optional<ParseResult> MakeSpecializationDeclaration(
base::Optional<ParseResult> MakeStructDeclaration(
ParseResultIterator* child_results) {
- bool is_export = child_results->NextAs<bool>();
+ bool is_export = HasExportAnnotation(child_results, "Struct");
+
StructFlags flags = StructFlag::kNone;
if (is_export) flags |= StructFlag::kExport;
@@ -1065,12 +1097,15 @@ base::Optional<ParseResult> MakeFunctionTypeExpression(
base::Optional<ParseResult> MakeReferenceTypeExpression(
ParseResultIterator* child_results) {
+ auto is_const = child_results->NextAs<bool>();
auto referenced_type = child_results->NextAs<TypeExpression*>();
std::vector<std::string> namespace_qualification{
TORQUE_INTERNAL_NAMESPACE_STRING};
std::vector<TypeExpression*> generic_arguments{referenced_type};
TypeExpression* result = MakeNode<BasicTypeExpression>(
- namespace_qualification, REFERENCE_TYPE_STRING, generic_arguments);
+ namespace_qualification,
+ is_const ? CONST_REFERENCE_TYPE_STRING : MUTABLE_REFERENCE_TYPE_STRING,
+ generic_arguments);
return ParseResult{result};
}
@@ -1124,7 +1159,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
const bool is_extern = child_results->NextAs<bool>();
auto name_identifier = child_results->NextAs<Identifier*>();
auto name = name_identifier->value;
- auto base_identifier = child_results->NextAs<base::Optional<Identifier*>>();
+ auto base_type_expression =
+ child_results->NextAs<base::Optional<TypeExpression*>>();
auto constexpr_generates_opt =
child_results->NextAs<base::Optional<std::string>>();
auto entries = child_results->NextAs<std::vector<Identifier*>>();
@@ -1142,12 +1178,12 @@ base::Optional<ParseResult> MakeEnumDeclaration(
auto constexpr_generates =
constexpr_generates_opt ? *constexpr_generates_opt : name;
- const bool generate_nonconstexpr = base_identifier.has_value();
+ const bool generate_nonconstexpr = base_type_expression.has_value();
std::vector<Declaration*> result;
// Build non-constexpr types.
if (generate_nonconstexpr) {
- DCHECK(base_identifier.has_value());
+ DCHECK(base_type_expression.has_value());
if (is_open) {
// For open enumerations, we define an abstract type and inherit all
@@ -1159,12 +1195,16 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// type kEntryN extends Enum;
// }
auto type_decl = MakeNode<AbstractTypeDeclaration>(
- name_identifier, false, base_identifier, base::nullopt);
+ name_identifier, false, base_type_expression, base::nullopt);
+
+ TypeExpression* name_type_expression =
+ MakeNode<BasicTypeExpression>(name_identifier->value);
+ name_type_expression->pos = name_identifier->pos;
std::vector<Declaration*> entry_decls;
for (const auto& entry_name_identifier : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- entry_name_identifier, false, name_identifier, base::nullopt));
+ entry_name_identifier, false, name_type_expression, base::nullopt));
}
result.push_back(type_decl);
@@ -1183,7 +1223,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
std::vector<Declaration*> entry_decls;
for (const auto& entry_name_identifier : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- entry_name_identifier, false, base_identifier, base::nullopt));
+ entry_name_identifier, false, base_type_expression, base::nullopt));
auto entry_type = MakeNode<BasicTypeExpression>(
std::vector<std::string>{name}, entry_name_identifier->value,
@@ -1213,13 +1253,15 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// }
Identifier* constexpr_type_identifier =
MakeNode<Identifier>(std::string(CONSTEXPR_TYPE_PREFIX) + name);
- base::Optional<Identifier*> base_constexpr_type_identifier = base::nullopt;
- if (base_identifier) {
- base_constexpr_type_identifier = MakeNode<Identifier>(
- std::string(CONSTEXPR_TYPE_PREFIX) + (*base_identifier)->value);
+ TypeExpression* constexpr_type_expression = MakeNode<BasicTypeExpression>(
+ std::string(CONSTEXPR_TYPE_PREFIX) + name);
+ base::Optional<TypeExpression*> base_constexpr_type_expression =
+ base::nullopt;
+ if (base_type_expression) {
+ base_constexpr_type_expression = AddConstexpr(*base_type_expression);
}
result.push_back(MakeNode<AbstractTypeDeclaration>(
- constexpr_type_identifier, false, base_constexpr_type_identifier,
+ constexpr_type_identifier, false, base_constexpr_type_expression,
constexpr_generates));
TypeExpression* type_expr = nullptr;
@@ -1227,10 +1269,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
Identifier* fromconstexpr_parameter_identifier = nullptr;
Statement* fromconstexpr_body = nullptr;
if (generate_nonconstexpr) {
- DCHECK(base_identifier.has_value());
- TypeExpression* base_type_expr = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, (*base_identifier)->value,
- std::vector<TypeExpression*>{});
+ DCHECK(base_type_expression.has_value());
type_expr = MakeNode<BasicTypeExpression>(
std::vector<std::string>{}, name, std::vector<TypeExpression*>{});
@@ -1243,7 +1282,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
std::vector<TypeExpression*>{type_expr},
std::vector<Expression*>{MakeNode<IntrinsicCallExpression>(
MakeNode<Identifier>("%FromConstexpr"),
- std::vector<TypeExpression*>{base_type_expr},
+ std::vector<TypeExpression*>{*base_type_expression},
std::vector<Expression*>{MakeNode<IdentifierExpression>(
std::vector<std::string>{},
fromconstexpr_parameter_identifier)})}));
@@ -1261,7 +1300,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
MakeNode<Identifier>(entry_constexpr_type), false,
- constexpr_type_identifier, constexpr_generates));
+ constexpr_type_expression, constexpr_generates));
// namespace Enum {
// const kEntry0: constexpr kEntry0 constexpr 'Enum::kEntry0';
@@ -1947,7 +1986,8 @@ struct TorqueGrammar : Grammar {
Rule({Token("builtin"), Token("("), typeList, Token(")"), Token("=>"),
&simpleType},
MakeFunctionTypeExpression),
- Rule({Token("&"), &simpleType}, MakeReferenceTypeExpression)};
+ Rule({CheckIf(Token("const")), Token("&"), &simpleType},
+ MakeReferenceTypeExpression)};
// Result: TypeExpression*
Symbol type = {Rule({&simpleType}), Rule({&type, Token("|"), &simpleType},
@@ -2320,7 +2360,7 @@ struct TorqueGrammar : Grammar {
Sequence({Token("generates"), &externalString})),
&optionalClassBody},
AsSingletonVector<Declaration*, MakeClassDeclaration>()),
- Rule({CheckIf(Token("@export")), Token("struct"), &name,
+ Rule({annotations, Token("struct"), &name,
TryOrDefault<GenericParameters>(&genericParameters), Token("{"),
List<Declaration*>(&method),
List<StructFieldExpression>(&structField), Token("}")},
@@ -2331,7 +2371,7 @@ struct TorqueGrammar : Grammar {
AsSingletonVector<Declaration*, MakeBitFieldStructDeclaration>()),
Rule({CheckIf(Token("transient")), Token("type"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- Optional<Identifier*>(Sequence({Token("extends"), &name})),
+ Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
Optional<std::string>(
@@ -2361,7 +2401,7 @@ struct TorqueGrammar : Grammar {
Rule({Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
&name, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
AsSingletonVector<Declaration*, MakeExternalRuntime>()),
- Rule({CheckIf(Token("@export")), CheckIf(Token("transitioning")),
+ Rule({annotations, CheckIf(Token("transitioning")),
Optional<std::string>(
Sequence({Token("operator"), &externalString})),
Token("macro"), &name,
@@ -2381,7 +2421,7 @@ struct TorqueGrammar : Grammar {
Rule({Token("#include"), &externalString},
AsSingletonVector<Declaration*, MakeCppIncludeDeclaration>()),
Rule({CheckIf(Token("extern")), Token("enum"), &name,
- Optional<Identifier*>(Sequence({Token("extends"), &name})),
+ Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
Sequence({Token("constexpr"), &externalString})),
Token("{"), NonemptyList<Identifier*>(&name, Token(",")),
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
index 0e5a8a7b6b..acb998a7aa 100644
--- a/deps/v8/src/torque/type-oracle.cc
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/torque/type-oracle.h"
+#include "src/base/optional.h"
#include "src/torque/type-visitor.h"
+#include "src/torque/types.h"
namespace v8 {
namespace internal {
@@ -68,6 +70,32 @@ Namespace* TypeOracle::CreateGenericTypeInstantiationNamespace() {
return Get().generic_type_instantiation_namespaces_.back().get();
}
+// static
+std::vector<const ClassType*> TypeOracle::GetClasses() {
+ std::vector<const ClassType*> result;
+ for (const std::unique_ptr<AggregateType>& t : Get().aggregate_types_) {
+ if (auto* class_type = ClassType::DynamicCast(t.get())) {
+ result.push_back(class_type);
+ }
+ }
+ return result;
+}
+
+base::Optional<const Type*> TypeOracle::MatchReferenceGeneric(
+ const Type* reference_type, bool* is_const) {
+ if (auto type = Type::MatchUnaryGeneric(reference_type,
+ GetMutableReferenceGeneric())) {
+ if (is_const) *is_const = false;
+ return type;
+ }
+ if (auto type =
+ Type::MatchUnaryGeneric(reference_type, GetConstReferenceGeneric())) {
+ if (is_const) *is_const = true;
+ return type;
+ }
+ return base::nullopt;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 04a78d872e..114bf043b5 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -81,11 +81,22 @@ class TypeOracle : public ContextualClass<TypeOracle> {
static const Type* GetGenericTypeInstance(GenericType* generic_type,
TypeVector arg_types);
- static GenericType* GetReferenceGeneric() {
- return Declarations::LookupUniqueGenericType(QualifiedName(
- {TORQUE_INTERNAL_NAMESPACE_STRING}, REFERENCE_TYPE_STRING));
+ static GenericType* GetReferenceGeneric(bool is_const) {
+ return Declarations::LookupUniqueGenericType(
+ QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
+ is_const ? CONST_REFERENCE_TYPE_STRING
+ : MUTABLE_REFERENCE_TYPE_STRING));
+ }
+ static GenericType* GetConstReferenceGeneric() {
+ return GetReferenceGeneric(true);
+ }
+ static GenericType* GetMutableReferenceGeneric() {
+ return GetReferenceGeneric(false);
}
+ static base::Optional<const Type*> MatchReferenceGeneric(
+ const Type* reference_type, bool* is_const = nullptr);
+
static GenericType* GetSliceGeneric() {
return Declarations::LookupUniqueGenericType(
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, SLICE_TYPE_STRING));
@@ -95,8 +106,20 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Declarations::LookupGlobalUniqueGenericType(WEAK_TYPE_STRING);
}
- static const Type* GetReferenceType(const Type* referenced_type) {
- return GetGenericTypeInstance(GetReferenceGeneric(), {referenced_type});
+ static GenericType* GetSmiTaggedGeneric() {
+ return Declarations::LookupGlobalUniqueGenericType(SMI_TAGGED_TYPE_STRING);
+ }
+
+ static const Type* GetReferenceType(const Type* referenced_type,
+ bool is_const) {
+ return GetGenericTypeInstance(GetReferenceGeneric(is_const),
+ {referenced_type});
+ }
+ static const Type* GetConstReferenceType(const Type* referenced_type) {
+ return GetReferenceType(referenced_type, true);
+ }
+ static const Type* GetMutableReferenceType(const Type* referenced_type) {
+ return GetReferenceType(referenced_type, false);
}
static const Type* GetSliceType(const Type* referenced_type) {
@@ -229,6 +252,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UINT32_TYPE_STRING);
}
+ static const Type* GetUint31Type() {
+ return Get().GetBuiltinType(UINT31_TYPE_STRING);
+ }
+
static const Type* GetInt16Type() {
return Get().GetBuiltinType(INT16_TYPE_STRING);
}
@@ -285,6 +312,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(UNINITIALIZED_ITERATOR_TYPE_STRING);
}
+ static const Type* GetFixedArrayBaseType() {
+ return Get().GetBuiltinType(FIXED_ARRAY_BASE_TYPE_STRING);
+ }
+
static base::Optional<const Type*> ImplicitlyConvertableFrom(
const Type* to, const Type* from) {
while (from != nullptr) {
@@ -307,6 +338,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
static const std::vector<std::unique_ptr<BitFieldStructType>>&
GetBitFieldStructTypes();
+ // By construction, this list of all classes is topologically sorted w.r.t.
+ // inheritance.
+ static std::vector<const ClassType*> GetClasses();
+
static void FinalizeAggregateTypes();
static size_t FreshTypeId() { return Get().next_type_id_++; }
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index ce942365e7..10ed87d247 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -81,7 +81,7 @@ const AbstractType* TypeVisitor::ComputeType(
const Type* parent_type = nullptr;
if (decl->extends) {
- parent_type = Declarations::LookupType(*decl->extends);
+ parent_type = TypeVisitor::ComputeType(*decl->extends);
if (parent_type->IsUnionType()) {
// UnionType::IsSupertypeOf requires that types can only extend from non-
// union types in order to work correctly.
@@ -90,10 +90,6 @@ const AbstractType* TypeVisitor::ComputeType(
}
}
- if (generates == "" && parent_type) {
- generates = parent_type->GetGeneratedTNodeTypeName();
- }
-
if (decl->is_constexpr && decl->transient) {
ReportError("cannot declare a transient type that is also constexpr");
}
@@ -244,81 +240,83 @@ const StructType* TypeVisitor::ComputeType(
const ClassType* TypeVisitor::ComputeType(
ClassDeclaration* decl, MaybeSpecializationKey specialized_from) {
- ClassType* new_class;
// TODO(sigurds): Remove this hack by introducing a declarable for classes.
const TypeAlias* alias =
Declarations::LookupTypeAlias(QualifiedName(decl->name->value));
- GlobalContext::RegisterClass(alias);
DCHECK_EQ(*alias->delayed_, decl);
- bool is_shape = decl->flags & ClassFlag::kIsShape;
- if (is_shape && !(decl->flags & ClassFlag::kExtern)) {
- ReportError("Shapes must be extern, add \"extern\" to the declaration.");
+ ClassFlags flags = decl->flags;
+ bool is_shape = flags & ClassFlag::kIsShape;
+ std::string generates = decl->name->value;
+ const Type* super_type = TypeVisitor::ComputeType(*decl->super);
+ if (is_shape) {
+ if (!(flags & ClassFlag::kExtern)) {
+ ReportError("Shapes must be extern, add \"extern\" to the declaration.");
+ }
+ if (flags & ClassFlag::kUndefinedLayout) {
+ ReportError("Shapes need to define their layout.");
+ }
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ if (!super_class ||
+ !super_class->IsSubtypeOf(TypeOracle::GetJSObjectType())) {
+ Error("Shapes need to extend a subclass of ",
+ *TypeOracle::GetJSObjectType())
+ .Throw();
+ }
+ // Shapes use their super class in CSA code since they have incomplete
+ // support for type-checks on the C++ side.
+ generates = super_class->name();
}
- if (is_shape && decl->flags & ClassFlag::kUndefinedLayout) {
- ReportError("Shapes need to define their layout.");
+ if (!decl->super) {
+ ReportError("Extern class must extend another type.");
}
- if (decl->flags & ClassFlag::kExtern) {
- if (!decl->super) {
- ReportError("Extern class must extend another type.");
+ if (super_type != TypeOracle::GetStrongTaggedType()) {
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ if (!super_class) {
+ ReportError(
+ "class \"", decl->name->value,
+ "\" must extend either StrongTagged or an already declared class");
}
- const Type* super_type = TypeVisitor::ComputeType(*decl->super);
- if (super_type != TypeOracle::GetStrongTaggedType()) {
- const ClassType* super_class = ClassType::DynamicCast(super_type);
- if (!super_class) {
- ReportError(
- "class \"", decl->name->value,
- "\" must extend either StrongTagged or an already declared class");
- }
- if (super_class->HasUndefinedLayout() &&
- !(decl->flags & ClassFlag::kUndefinedLayout)) {
- Error("Class \"", decl->name->value,
- "\" defines its layout but extends a class which does not")
- .Position(decl->pos);
- }
+ if (super_class->HasUndefinedLayout() &&
+ !(flags & ClassFlag::kUndefinedLayout)) {
+ Error("Class \"", decl->name->value,
+ "\" defines its layout but extends a class which does not")
+ .Position(decl->pos);
}
-
- std::string generates = decl->name->value;
- if (is_shape) {
- const ClassType* super_class = ClassType::DynamicCast(super_type);
- if (!super_class ||
- !super_class->IsSubtypeOf(TypeOracle::GetJSObjectType())) {
- Error("Shapes need to extend a subclass of ",
- *TypeOracle::GetJSObjectType())
- .Throw();
- }
- // Shapes use their super class in CSA code since they have incomplete
- // support for type-checks on the C++ side.
- generates = super_class->name();
+ if ((flags & ClassFlag::kExport) &&
+ !(super_class->ShouldExport() || super_class->IsExtern())) {
+ Error("cannot export class ", decl->name,
+ " because superclass is neither @export or extern");
}
+ }
+ if ((flags & ClassFlag::kGenerateBodyDescriptor ||
+ flags & ClassFlag::kExport) &&
+ flags & ClassFlag::kUndefinedLayout) {
+ Error("Class \"", decl->name->value,
+ "\" requires a layout but doesn't have one");
+ }
+ if (flags & ClassFlag::kExtern) {
if (decl->generates) {
bool enforce_tnode_type = true;
generates = ComputeGeneratesType(decl->generates, enforce_tnode_type);
}
-
- new_class = TypeOracle::GetClassType(super_type, decl->name->value,
- decl->flags, generates, decl, alias);
- } else {
- if (!decl->super) {
- ReportError("Intern class ", decl->name->value,
- " must extend class Struct.");
- }
- const Type* super_type = TypeVisitor::ComputeType(*decl->super);
- const ClassType* super_class = ClassType::DynamicCast(super_type);
- const Type* struct_type =
- Declarations::LookupGlobalType(QualifiedName("Struct"));
- if (!super_class || super_class != struct_type) {
- ReportError("Intern class ", decl->name->value,
- " must extend class Struct.");
+ if (flags & ClassFlag::kExport) {
+ Error("cannot export a class that is marked extern");
}
+ } else {
if (decl->generates) {
ReportError("Only extern classes can specify a generated type.");
}
- new_class = TypeOracle::GetClassType(
- super_type, decl->name->value,
- decl->flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify,
- decl->name->value, decl, alias);
+ if (super_type != TypeOracle::GetStrongTaggedType()) {
+ if (flags & ClassFlag::kUndefinedLayout) {
+ Error("non-external classes must have defined layouts");
+ }
+ }
+ flags = flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify |
+ ClassFlag::kGenerateBodyDescriptor;
}
- return new_class;
+
+ return TypeOracle::GetClassType(super_type, decl->name->value, flags,
+ generates, decl, alias);
}
const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
@@ -406,33 +404,6 @@ void TypeVisitor::VisitClassFieldsAndMethods(
ReportError("in-object properties cannot be weak");
}
}
- if (!(class_declaration->flags & ClassFlag::kExtern)) {
- if (!field_type->IsSubtypeOf(TypeOracle::GetObjectType())) {
- ReportError(
- "non-extern classes only support subtypes of type Object, but "
- "found type ",
- *field_type);
- }
- if (field_expression.weak) {
- ReportError("non-extern classes do not support weak fields");
- }
- }
- const StructType* struct_type = StructType::DynamicCast(field_type);
- if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
- for (const Field& struct_field : struct_type->fields()) {
- if (!struct_field.name_and_type.type->IsSubtypeOf(
- TypeOracle::GetTaggedType())) {
- // If we ever actually need different sizes of struct fields, then we
- // can define the packing and alignment rules. Until then, let's keep
- // it simple. This restriction also helps keep the tagged and untagged
- // regions separate in the class layout (see also
- // FieldOffsetsGenerator::GetSectionFor).
- Error(
- "Classes do not support fields which are structs containing "
- "untagged data.");
- }
- }
- }
base::Optional<Expression*> array_length = field_expression.index;
const Field& field = class_type->RegisterField(
{field_expression.name_and_type.name->pos,
@@ -445,6 +416,12 @@ void TypeVisitor::VisitClassFieldsAndMethods(
field_expression.generate_verify});
ResidueClass field_size = std::get<0>(field.GetFieldSizeInformation());
if (field.index) {
+ // Validate that a value at any index in a packed array is aligned
+ // correctly, since it is possible to define a struct whose size is not a
+ // multiple of its alignment.
+ field.ValidateAlignment(class_offset +
+ field_size * ResidueClass::Unknown());
+
if (auto literal = NumberLiteralExpression::DynamicCast(*field.index)) {
size_t value = static_cast<size_t>(literal->number);
if (value != literal->number) {
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index a8e56ff5ee..e08716c98a 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -67,6 +67,20 @@ std::string Type::SimpleName() const {
return *aliases_.begin();
}
+// TODO(danno): HandlifiedCppTypeName should be used universally in Torque
+// where the C++ type of a Torque object is required.
+std::string Type::HandlifiedCppTypeName() const {
+ if (IsSubtypeOf(TypeOracle::GetSmiType())) return "int";
+ if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ base::Optional<const ClassType*> class_type = ClassSupertype();
+ std::string type =
+ class_type ? (*class_type)->GetGeneratedTNodeTypeName() : "Object";
+ return "Handle<" + type + ">";
+ } else {
+ return ConstexprVersion()->GetGeneratedTypeName();
+ }
+}
+
bool Type::IsSubtypeOf(const Type* supertype) const {
if (supertype->IsTopType()) return true;
if (IsNever()) return true;
@@ -99,6 +113,15 @@ base::Optional<const ClassType*> Type::ClassSupertype() const {
return base::nullopt;
}
+base::Optional<const StructType*> Type::StructSupertype() const {
+ for (const Type* t = this; t != nullptr; t = t->parent()) {
+ if (auto* struct_type = StructType::DynamicCast(t)) {
+ return struct_type;
+ }
+ }
+ return base::nullopt;
+}
+
// static
const Type* Type::CommonSupertype(const Type* a, const Type* b) {
int diff = a->Depth() - b->Depth();
@@ -147,6 +170,7 @@ std::string Type::GetGeneratedTNodeTypeName() const {
}
std::string AbstractType::GetGeneratedTNodeTypeNameImpl() const {
+ if (generated_type_.empty()) return parent()->GetGeneratedTNodeTypeName();
return generated_type_;
}
@@ -371,10 +395,31 @@ size_t StructType::PackedSize() const {
return result;
}
+StructType::Classification StructType::ClassifyContents() const {
+ Classification result = ClassificationFlag::kEmpty;
+ for (const Field& struct_field : fields()) {
+ const Type* field_type = struct_field.name_and_type.type;
+ if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ result |= ClassificationFlag::kTagged;
+ } else if (auto field_as_struct = field_type->StructSupertype()) {
+ result |= (*field_as_struct)->ClassifyContents();
+ } else {
+ result |= ClassificationFlag::kUntagged;
+ }
+ }
+ return result;
+}
+
// static
std::string Type::ComputeName(const std::string& basename,
MaybeSpecializationKey specialized_from) {
if (!specialized_from) return basename;
+ if (specialized_from->generic == TypeOracle::GetConstReferenceGeneric()) {
+ return torque::ToString("const &", *specialized_from->specialized_types[0]);
+ }
+ if (specialized_from->generic == TypeOracle::GetMutableReferenceGeneric()) {
+ return torque::ToString("&", *specialized_from->specialized_types[0]);
+ }
std::stringstream s;
s << basename << "<";
bool first = true;
@@ -501,47 +546,76 @@ void ClassType::GenerateAccessors() {
// For each field, construct AST snippets that implement a CSA accessor
// function. The implementation iterator will turn the snippets into code.
for (auto& field : fields_) {
- if (field.index || field.name_and_type.type == TypeOracle::GetVoidType()) {
+ if (field.name_and_type.type == TypeOracle::GetVoidType()) {
continue;
}
CurrentSourcePosition::Scope position_activator(field.pos);
+
IdentifierExpression* parameter =
MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"o"}));
+ IdentifierExpression* index =
+ MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"i"}));
// Load accessor
std::string camel_field_name = CamelifyString(field.name_and_type.name);
std::string load_macro_name = "Load" + this->name() + camel_field_name;
+
+ // For now, only generate indexed accessors for simple types
+ if (field.index.has_value() && field.name_and_type.type->IsStructType()) {
+ continue;
+ }
+
Signature load_signature;
load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
load_signature.parameter_types.types.push_back(this);
+ if (field.index) {
+ load_signature.parameter_names.push_back(MakeNode<Identifier>("i"));
+ load_signature.parameter_types.types.push_back(
+ TypeOracle::GetIntPtrType());
+ }
load_signature.parameter_types.var_args = false;
load_signature.return_type = field.name_and_type.type;
- Statement* load_body =
- MakeNode<ReturnStatement>(MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name)));
+
+ Expression* load_expression = MakeNode<FieldAccessExpression>(
+ parameter, MakeNode<Identifier>(field.name_and_type.name));
+ if (field.index) {
+ load_expression =
+ MakeNode<ElementAccessExpression>(load_expression, index);
+ }
+ Statement* load_body = MakeNode<ReturnStatement>(load_expression);
Declarations::DeclareMacro(load_macro_name, true, base::nullopt,
load_signature, load_body, base::nullopt);
// Store accessor
- IdentifierExpression* value = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
- std::string store_macro_name = "Store" + this->name() + camel_field_name;
- Signature store_signature;
- store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
- store_signature.parameter_names.push_back(MakeNode<Identifier>("v"));
- store_signature.parameter_types.types.push_back(this);
- store_signature.parameter_types.types.push_back(field.name_and_type.type);
- store_signature.parameter_types.var_args = false;
- // TODO(danno): Store macros probably should return their value argument
- store_signature.return_type = TypeOracle::GetVoidType();
- Statement* store_body =
- MakeNode<ExpressionStatement>(MakeNode<AssignmentExpression>(
- MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name)),
- value));
- Declarations::DeclareMacro(store_macro_name, true, base::nullopt,
- store_signature, store_body, base::nullopt,
- false);
+ if (!field.const_qualified) {
+ IdentifierExpression* value = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
+ std::string store_macro_name = "Store" + this->name() + camel_field_name;
+ Signature store_signature;
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
+ store_signature.parameter_types.types.push_back(this);
+ if (field.index) {
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("i"));
+ store_signature.parameter_types.types.push_back(
+ TypeOracle::GetIntPtrType());
+ }
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("v"));
+ store_signature.parameter_types.types.push_back(field.name_and_type.type);
+ store_signature.parameter_types.var_args = false;
+ // TODO(danno): Store macros probably should return their value argument
+ store_signature.return_type = TypeOracle::GetVoidType();
+ Expression* store_expression = MakeNode<FieldAccessExpression>(
+ parameter, MakeNode<Identifier>(field.name_and_type.name));
+ if (field.index) {
+ store_expression =
+ MakeNode<ElementAccessExpression>(store_expression, index);
+ }
+ Statement* store_body = MakeNode<ExpressionStatement>(
+ MakeNode<AssignmentExpression>(store_expression, value));
+ Declarations::DeclareMacro(store_macro_name, true, base::nullopt,
+ store_signature, store_body, base::nullopt,
+ false);
+ }
}
}
@@ -658,7 +732,7 @@ VisitResult ProjectStructField(VisitResult structure,
BottomOffset begin = structure.stack_range().begin();
// Check constructor this super classes for fields.
- const StructType* type = StructType::cast(structure.type());
+ const StructType* type = *structure.type()->StructSupertype();
auto& fields = type->fields();
for (auto& field : fields) {
BottomOffset end = begin + LoweredSlotCount(field.name_and_type.type);
@@ -677,8 +751,8 @@ void AppendLoweredTypes(const Type* type, std::vector<const Type*>* result) {
DCHECK_NE(type, TypeOracle::GetNeverType());
if (type->IsConstexpr()) return;
if (type == TypeOracle::GetVoidType()) return;
- if (auto* s = StructType::DynamicCast(type)) {
- for (const Field& field : s->fields()) {
+ if (base::Optional<const StructType*> s = type->StructSupertype()) {
+ for (const Field& field : (*s)->fields()) {
AppendLoweredTypes(field.name_and_type.type, result);
}
} else {
@@ -781,9 +855,9 @@ size_t StructType::AlignmentLog2() const {
void Field::ValidateAlignment(ResidueClass at_offset) const {
const Type* type = name_and_type.type;
- const StructType* struct_type = StructType::DynamicCast(type);
+ base::Optional<const StructType*> struct_type = type->StructSupertype();
if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
- for (const Field& field : struct_type->fields()) {
+ for (const Field& field : (*struct_type)->fields()) {
field.ValidateAlignment(at_offset);
size_t field_size = std::get<0>(field.GetFieldSizeInformation());
at_offset += field_size;
@@ -837,12 +911,12 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
} else if (type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
size = TargetArchitecture::RawPtrSize();
size_string = "kIntptrSize";
- } else if (const StructType* struct_type = StructType::DynamicCast(type)) {
+ } else if (auto struct_type = type->StructSupertype()) {
if (type == TypeOracle::GetFloat64OrHoleType()) {
size = kDoubleSize;
size_string = "kDoubleSize";
} else {
- size = struct_type->PackedSize();
+ size = (*struct_type)->PackedSize();
size_string = std::to_string(size);
}
} else {
@@ -853,6 +927,7 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
bool IsAnyUnsignedInteger(const Type* type) {
return type == TypeOracle::GetUint32Type() ||
+ type == TypeOracle::GetUint31Type() ||
type == TypeOracle::GetUint16Type() ||
type == TypeOracle::GetUint8Type() ||
type == TypeOracle::GetUIntPtrType();
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 19b5cdd635..b60879ce85 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -113,6 +113,8 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
// Used for naming generated code.
virtual std::string SimpleName() const;
+ std::string HandlifiedCppTypeName() const;
+
const Type* parent() const { return parent_; }
bool IsVoid() const { return IsAbstractName(VOID_TYPE_STRING); }
bool IsNever() const { return IsAbstractName(NEVER_TYPE_STRING); }
@@ -131,6 +133,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
virtual const Type* NonConstexprVersion() const { return this; }
std::string GetConstexprGeneratedTypeName() const;
base::Optional<const ClassType*> ClassSupertype() const;
+ base::Optional<const StructType*> StructSupertype() const;
virtual std::vector<RuntimeType> GetRuntimeTypes() const { return {}; }
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
@@ -256,6 +259,9 @@ class AbstractType final : public Type {
const std::string& name() const { return name_; }
std::string ToExplicitString() const override { return name(); }
std::string GetGeneratedTypeNameImpl() const override {
+ if (generated_type_.empty()) {
+ return parent()->GetGeneratedTypeName();
+ }
return IsConstexpr() ? generated_type_ : "TNode<" + generated_type_ + ">";
}
std::string GetGeneratedTNodeTypeNameImpl() const override;
@@ -575,6 +581,17 @@ class StructType final : public AggregateType {
size_t AlignmentLog2() const override;
+ enum class ClassificationFlag {
+ kEmpty = 0,
+ kTagged = 1 << 0,
+ kUntagged = 1 << 1,
+ kMixed = kTagged | kUntagged,
+ };
+ using Classification = base::Flags<ClassificationFlag>;
+
+ // Classifies a struct as containing tagged data, untagged data, or both.
+ Classification ClassifyContents() const;
+
private:
friend class TypeOracle;
StructType(Namespace* nspace, const StructDeclaration* decl,
@@ -600,12 +617,15 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return (flags_ & ClassFlag::kGeneratePrint || !IsExtern()) &&
- !HasUndefinedLayout();
+ return !IsExtern() ||
+ ((flags_ & ClassFlag::kGeneratePrint) && !HasUndefinedLayout());
}
bool ShouldGenerateVerify() const {
- return (flags_ & ClassFlag::kGenerateVerify || !IsExtern()) &&
- !HasUndefinedLayout() && !IsShape();
+ return !IsExtern() || ((flags_ & ClassFlag::kGenerateVerify) &&
+ (!HasUndefinedLayout() && !IsShape()));
+ }
+ bool ShouldGenerateBodyDescriptor() const {
+ return flags_ & ClassFlag::kGenerateBodyDescriptor || !IsExtern();
}
bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; }
@@ -613,8 +633,10 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
}
bool GenerateCppClassDefinitions() const {
- return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern();
+ return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern() ||
+ ShouldGenerateBodyDescriptor();
}
+ bool ShouldExport() const { return flags_ & ClassFlag::kExport; }
bool IsShape() const { return flags_ & ClassFlag::kIsShape; }
bool HasStaticSize() const;
bool HasIndexedField() const override;
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 9c04d73d1b..312adcfb24 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -222,7 +222,7 @@ bool IsUpperCamelCase(const std::string& s) {
if (s.empty()) return false;
size_t start = 0;
if (s[0] == '_') start = 1;
- return isupper(s[start]) && !ContainsUnderscore(s.substr(1));
+ return isupper(s[start]);
}
bool IsSnakeCase(const std::string& s) {
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 448e7da7fb..1c5b3079f5 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -6,10 +6,10 @@
#define V8_TORQUE_UTILS_H_
#include <ostream>
+#include <queue>
#include <streambuf>
#include <string>
#include <unordered_set>
-#include <vector>
#include "src/base/functional.h"
#include "src/base/optional.h"
@@ -172,6 +172,13 @@ void PrintCommaSeparatedList(std::ostream& os, const T& list) {
struct BottomOffset {
size_t offset;
+ BottomOffset(std::nullptr_t zero = 0) // NOLINT(runtime/explicit)
+ : offset(0) {}
+ explicit BottomOffset(std::size_t offset) : offset(offset) {}
+ BottomOffset& operator=(std::size_t offset) {
+ this->offset = offset;
+ return *this;
+ }
BottomOffset& operator++() {
++offset;
return *this;
@@ -488,6 +495,36 @@ class ResidueClass {
static const size_t kMaxModulusLog2 = 8 * sizeof(size_t);
};
+template <typename T>
+class Worklist {
+ public:
+ bool IsEmpty() const {
+ DCHECK_EQ(queue_.size(), contained_.size());
+ return queue_.empty();
+ }
+
+ bool Enqueue(T value) {
+ if (contained_.find(value) != contained_.end()) return false;
+ queue_.push(value);
+ contained_.insert(value);
+ DCHECK_EQ(queue_.size(), contained_.size());
+ return true;
+ }
+
+ T Dequeue() {
+ DCHECK(!IsEmpty());
+ T value = queue_.front();
+ queue_.pop();
+ contained_.erase(value);
+ DCHECK_EQ(queue_.size(), contained_.size());
+ return value;
+ }
+
+ private:
+ std::queue<T> queue_;
+ std::unordered_set<T> contained_;
+};
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/utils/vector.h b/deps/v8/src/utils/vector.h
index 5c106cc0c8..71dadc6c0e 100644
--- a/deps/v8/src/utils/vector.h
+++ b/deps/v8/src/utils/vector.h
@@ -9,6 +9,7 @@
#include <cstring>
#include <iterator>
#include <memory>
+#include <type_traits>
#include "src/common/checks.h"
#include "src/common/globals.h"
@@ -71,6 +72,9 @@ class Vector {
// Returns a pointer to the start of the data in the vector.
constexpr T* begin() const { return start_; }
+ // For consistency with other containers, do also provide a {data} accessor.
+ constexpr T* data() const { return start_; }
+
// Returns a pointer past the end of the data in the vector.
constexpr T* end() const { return start_ + length_; }
@@ -113,6 +117,14 @@ class Vector {
template <typename S>
static constexpr Vector<T> cast(Vector<S> input) {
+ // Casting is potentially dangerous, so be really restrictive here. This
+ // might be lifted once we have use cases for that.
+ STATIC_ASSERT(std::is_pod<S>::value);
+ STATIC_ASSERT(std::is_pod<T>::value);
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK_EQ(0, (input.length() * sizeof(S)) % sizeof(T));
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(input.begin()) % alignof(T));
+#endif
return Vector<T>(reinterpret_cast<T*>(input.begin()),
input.length() * sizeof(S) / sizeof(T));
}
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 9b7f8696a1..fe921c6406 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -264,7 +264,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
patching_assembler.PadWithNops();
// Now generate the OOL code.
- AllocateStackSpace(bytes);
+ AllocateStackSpace(frame_size);
// Jump back to the start of the function (from {pc_offset()} to {offset +
// liftoff::kPatchInstructionsRequired * kInstrSize}).
int func_start_offset =
@@ -287,17 +287,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type) {
- case kWasmS128:
- return ValueTypes::ElementSizeInBytes(type);
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -307,11 +307,11 @@ bool LiftoffAssembler::NeedsAlignment(ValueType type) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case kWasmI64: {
+ case ValueType::kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -319,10 +319,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Operand(high_word));
break;
}
- case kWasmF32:
+ case ValueType::kF32:
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
- case kWasmF64: {
+ case ValueType::kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_scalar()), extra_scratch);
break;
@@ -582,25 +582,47 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { dmb(ISH); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
MemOperand src(fp, offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
ldr(dst.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
ldr(dst.low_gp(), src);
ldr(dst.high_gp(), MemOperand(fp, offset + kSystemPointerSize));
break;
- case kWasmF32:
+ case ValueType::kF32:
vldr(liftoff::GetFloatRegister(dst.fp()), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
vldr(dst.fp(), src);
break;
+ case ValueType::kS128: {
+ UseScratchRegisterScope temps(this);
+ Register addr = liftoff::CalculateActualAddress(this, &temps, src.rn(),
+ no_reg, src.offset());
+ vld1(Neon8, NeonListOperand(dst.low_fp(), 2), NeonMemOperand(addr));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -636,21 +658,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
str(reg.gp(), dst);
break;
- case kWasmI64:
+ case ValueType::kI64:
str(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
str(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case kWasmF32:
+ case ValueType::kF32:
vstr(liftoff::GetFloatRegister(reg.fp()), dst);
break;
- case kWasmF64:
+ case ValueType::kF64:
vstr(reg.fp(), dst);
break;
- case kWasmS128: {
+ case ValueType::kS128: {
UseScratchRegisterScope temps(this);
Register addr = liftoff::CalculateActualAddress(this, &temps, dst.rn(),
no_reg, dst.offset());
@@ -674,12 +696,12 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
} else {
src = temps.Acquire();
}
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
mov(src, Operand(value.to_i32()));
str(src, dst);
break;
- case kWasmI64: {
+ case ValueType::kI64: {
int32_t low_word = value.to_i64();
mov(src, Operand(low_word));
str(src, liftoff::GetHalfStackSlot(offset, kLowWord));
@@ -695,21 +717,21 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
ldr(reg.gp(), liftoff::GetStackSlot(offset));
break;
- case kWasmI64:
+ case ValueType::kI64:
ldr(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case kWasmF32:
+ case ValueType::kF32:
vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(offset));
break;
- case kWasmF64:
+ case ValueType::kF64:
vldr(reg.fp(), liftoff::GetStackSlot(offset));
break;
- case kWasmS128: {
+ case ValueType::kS128: {
// Get memory address of slot to fill from.
MemOperand slot = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
@@ -1531,11 +1553,266 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Move(dst.low_fp(), src.fp());
+ TurboAssembler::Move(dst.high_fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.fp(), liftoff::GetSimd128Register(lhs.low_fp()),
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(src1.low_fp()), src2.fp(),
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vadd(dst.low_fp(), lhs.low_fp(), rhs.low_fp());
+ vadd(dst.high_fp(), lhs.high_fp(), rhs.high_fp());
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vsub(dst.low_fp(), lhs.low_fp(), rhs.low_fp());
+ vsub(dst.high_fp(), lhs.high_fp(), rhs.high_fp());
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "f64x2mul");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon32, liftoff::GetSimd128Register(dst.low_fp()), src.fp(), 0);
}
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(liftoff::GetFloatRegister(dst.fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(src1.low_fp()),
+ liftoff::GetFloatRegister(src2.fp()), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vadd(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vsub(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "f32x4mul");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Simd128Register dst_simd = liftoff::GetSimd128Register(dst.low_fp());
+ vdup(Neon32, dst_simd, src.low_gp());
+ ReplaceLane(dst_simd, dst_simd, src.high_gp(), NeonS32, 1);
+ ReplaceLane(dst_simd, dst_simd, src.high_gp(), NeonS32, 3);
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.low_gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS32,
+ imm_lane_idx * 2);
+ ExtractLane(dst.high_gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS32,
+ imm_lane_idx * 2 + 1);
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ Simd128Register dst_simd = liftoff::GetSimd128Register(dst.low_fp());
+ Simd128Register src1_simd = liftoff::GetSimd128Register(src1.low_fp());
+ ReplaceLane(dst_simd, src1_simd, src2.low_gp(), NeonS32, imm_lane_idx * 2);
+ ReplaceLane(dst_simd, dst_simd, src2.high_gp(), NeonS32,
+ imm_lane_idx * 2 + 1);
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vadd(Neon64, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vsub(Neon64, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2mul");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vdup(Neon32, liftoff::GetSimd128Register(dst.low_fp()), src.gp());
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS32,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(src1.low_fp()), src2.gp(), NeonS32,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vadd(Neon32, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vsub(Neon32, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4mul");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vdup(Neon16, liftoff::GetSimd128Register(dst.low_fp()), src.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vadd(Neon16, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vsub(Neon16, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8mul");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonU16,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS16,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(src1.low_fp()), src2.gp(), NeonS16,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vdup(Neon8, liftoff::GetSimd128Register(dst.low_fp()), src.gp());
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonU8,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS8,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(src1.low_fp()), src2.gp(), NeonS8,
+ imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vadd(Neon8, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vsub(Neon8, liftoff::GetSimd128Register(dst.low_fp()),
+ liftoff::GetSimd128Register(lhs.low_fp()),
+ liftoff::GetSimd128Register(rhs.low_fp()));
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16mul");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
ldr(limit_address, MemOperand(limit_address));
cmp(sp, limit_address);
@@ -1603,7 +1880,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -1616,25 +1893,25 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
- switch (param_type) {
- case kWasmI32:
+ switch (param_type.kind()) {
+ case ValueType::kI32:
str(args->gp(), MemOperand(sp, arg_bytes));
break;
- case kWasmI64:
+ case ValueType::kI64:
str(args->low_gp(), MemOperand(sp, arg_bytes));
str(args->high_gp(), MemOperand(sp, arg_bytes + kSystemPointerSize));
break;
- case kWasmF32:
+ case ValueType::kF32:
vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
break;
- case kWasmF64:
+ case ValueType::kF64:
vstr(args->fp(), MemOperand(sp, arg_bytes));
break;
default:
UNREACHABLE();
}
args++;
- arg_bytes += ValueTypes::MemSize(param_type);
+ arg_bytes += param_type.element_size_bytes();
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -1659,18 +1936,18 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
// Load potential output value from the buffer on the stack.
if (out_argument_type != kWasmStmt) {
- switch (out_argument_type) {
- case kWasmI32:
+ switch (out_argument_type.kind()) {
+ case ValueType::kI32:
ldr(result_reg->gp(), MemOperand(sp));
break;
- case kWasmI64:
+ case ValueType::kI64:
ldr(result_reg->low_gp(), MemOperand(sp));
ldr(result_reg->high_gp(), MemOperand(sp, kSystemPointerSize));
break;
- case kWasmF32:
+ case ValueType::kF32:
vldr(liftoff::GetFloatRegister(result_reg->fp()), MemOperand(sp));
break;
- case kWasmF64:
+ case ValueType::kF64:
vldr(result_reg->fp(), MemOperand(sp));
break;
default:
@@ -1684,7 +1961,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
DCHECK(target != no_reg);
@@ -1706,52 +1983,63 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(sp, sp, Operand(size));
}
-void LiftoffAssembler::DebugBreak() { stop(); }
-
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
- switch (src.type()) {
+ switch (src.type().kind()) {
// i32 and i64 can be treated as similar cases, i64 being previously
// split into two i32 registers
- case kWasmI32:
- case kWasmI64:
- case kWasmF32: {
+ case ValueType::kI32:
+ case ValueType::kI64:
+ case ValueType::kF32: {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->Push(scratch);
} break;
- case kWasmF64: {
+ case ValueType::kF64: {
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch);
} break;
+ case ValueType::kS128: {
+ MemOperand mem_op = liftoff::GetStackSlot(slot.src_offset_);
+ UseScratchRegisterScope temps(asm_);
+ Register addr = liftoff::CalculateActualAddress(
+ asm_, &temps, mem_op.rn(), no_reg, mem_op.offset());
+ QwNeonRegister scratch = temps.AcquireQ();
+ asm_->vld1(Neon8, NeonListOperand(scratch), NeonMemOperand(addr));
+ asm_->vpush(scratch);
+ break;
+ }
default:
UNREACHABLE();
}
break;
}
case LiftoffAssembler::VarState::kRegister:
- switch (src.type()) {
- case kWasmI64: {
+ switch (src.type().kind()) {
+ case ValueType::kI64: {
LiftoffRegister reg =
slot.half_ == kLowWord ? src.reg().low() : src.reg().high();
asm_->push(reg.gp());
} break;
- case kWasmI32:
+ case ValueType::kI32:
asm_->push(src.reg().gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
asm_->vpush(liftoff::GetFloatRegister(src.reg().fp()));
break;
- case kWasmF64:
+ case ValueType::kF64:
asm_->vpush(src.reg().fp());
break;
+ case ValueType::kS128:
+ asm_->vpush(liftoff::GetSimd128Register(src.reg().low_fp()));
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 9697fabf9f..bcf78184b1 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -46,16 +46,16 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
return reg.gp().W();
- case kWasmI64:
+ case ValueType::kI64:
return reg.gp().X();
- case kWasmF32:
+ case ValueType::kF32:
return reg.fp().S();
- case kWasmF64:
+ case ValueType::kF64:
return reg.fp().D();
- case kWasmS128:
+ case ValueType::kS128:
return reg.fp().Q();
default:
UNREACHABLE();
@@ -74,14 +74,14 @@ inline CPURegList PadVRegList(RegList list) {
inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
return temps->AcquireW();
- case kWasmI64:
+ case ValueType::kI64:
return temps->AcquireX();
- case kWasmF32:
+ case ValueType::kF32:
return temps->AcquireS();
- case kWasmF64:
+ case ValueType::kF64:
return temps->AcquireD();
default:
UNREACHABLE();
@@ -174,17 +174,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take.
- switch (type) {
- case kWasmS128:
- return ValueTypes::ElementSizeInBytes(type);
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -194,17 +194,17 @@ bool LiftoffAssembler::NeedsAlignment(ValueType type) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
Mov(reg.gp().W(), Immediate(value.to_i32(), rmode));
break;
- case kWasmI64:
+ case ValueType::kI64:
Mov(reg.gp().X(), Immediate(value.to_i64(), rmode));
break;
- case kWasmF32:
+ case ValueType::kF32:
Fmov(reg.fp().S(), value.to_f32_boxed().get_scalar());
break;
- case kWasmF64:
+ case ValueType::kF64:
Fmov(reg.fp().D(), value.to_f64_boxed().get_scalar());
break;
default:
@@ -382,6 +382,21 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -429,8 +444,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg();
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
if (value.to_i32() == 0) {
src = wzr;
} else {
@@ -438,7 +453,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Mov(src.W(), value.to_i32());
}
break;
- case kWasmI64:
+ case ValueType::kI64:
if (value.to_i64() == 0) {
src = xzr;
} else {
@@ -1003,15 +1018,15 @@ void LiftoffAssembler::emit_jump(Register target) { Br(target); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
if (rhs.is_valid()) {
Cmp(lhs.W(), rhs.W());
} else {
Cmp(lhs.W(), wzr);
}
break;
- case kWasmI64:
+ case ValueType::kI64:
if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X());
} else {
@@ -1069,11 +1084,234 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Dup(dst.fp().V2D(), src.fp().D(), 0);
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Mov(dst.fp().D(), lhs.fp().V2D(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ Mov(dst.fp().V2D(), src1.fp().V2D());
+ }
+ Mov(dst.fp().V2D(), imm_lane_idx, src2.fp().V2D(), 0);
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fadd(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fsub(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "f64x2mul");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V4S(), src.fp().S(), 0);
}
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Mov(dst.fp().S(), lhs.fp().V4S(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ Mov(dst.fp().V4S(), src1.fp().V4S());
+ }
+ Mov(dst.fp().V4S(), imm_lane_idx, src2.fp().V4S(), 0);
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fadd(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fsub(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "f32x4mul");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Dup(dst.fp().V2D(), src.gp().X());
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Mov(dst.gp().X(), lhs.fp().V2D(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ Mov(dst.fp().V2D(), src1.fp().V2D());
+ }
+ Mov(dst.fp().V2D(), imm_lane_idx, src2.gp().X());
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Add(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sub(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2mul");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Dup(dst.fp().V4S(), src.gp().W());
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Mov(dst.gp().W(), lhs.fp().V4S(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ Mov(dst.fp().V4S(), src1.fp().V4S());
+ }
+ Mov(dst.fp().V4S(), imm_lane_idx, src2.gp().W());
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Add(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sub(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4mul");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Dup(dst.fp().V8H(), src.gp().W());
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Umov(dst.gp().W(), lhs.fp().V8H(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Smov(dst.gp().W(), lhs.fp().V8H(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ Mov(dst.fp().V8H(), src1.fp().V8H());
+ }
+ Mov(dst.fp().V8H(), imm_lane_idx, src2.gp().W());
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Add(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8mul");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Dup(dst.fp().V16B(), src.gp().W());
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Umov(dst.gp().W(), lhs.fp().V16B(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Smov(dst.gp().W(), lhs.fp().V16B(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ Mov(dst.fp().V16B(), src1.fp().V16B());
+ }
+ Mov(dst.fp().V16B(), imm_lane_idx, src2.gp().W());
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Add(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16mul");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
Ldr(limit_address, MemOperand(limit_address));
Cmp(sp, limit_address);
@@ -1103,7 +1341,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -1116,7 +1354,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
Poke(liftoff::GetRegFromType(*args++, param_type), arg_bytes);
- arg_bytes += ValueTypes::MemSize(param_type);
+ arg_bytes += param_type.element_size_bytes();
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -1150,7 +1388,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// For Arm64, we have more cache registers than wasm parameters. That means
@@ -1178,15 +1416,16 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Drop(size, 1);
}
-void LiftoffAssembler::DebugBreak() { debug("DebugBreak", 0, BREAK); }
-
void LiftoffStackSlots::Construct() {
- size_t slot_count = slots_.size();
+ size_t num_slots = 0;
+ for (auto& slot : slots_) {
+ num_slots += slot.src_.type() == kWasmS128 ? 2 : 1;
+ }
// The stack pointer is required to be quadword aligned.
- asm_->Claim(RoundUp(slot_count, 2));
- size_t slot_index = 0;
+ asm_->Claim(RoundUp(num_slots, 2));
+ size_t poke_offset = num_slots * kXRegSize;
for (auto& slot : slots_) {
- size_t poke_offset = (slot_count - slot_index - 1) * kXRegSize;
+ poke_offset -= slot.src_.type() == kWasmS128 ? kXRegSize * 2 : kXRegSize;
switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
@@ -1213,7 +1452,6 @@ void LiftoffStackSlots::Construct() {
}
break;
}
- slot_index++;
}
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 7e6356b833..0172b282dc 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -40,20 +40,23 @@ static constexpr LiftoffRegList kByteRegs =
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
int32_t offset, ValueType type) {
Operand src(base, offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->mov(dst.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->mov(dst.low_gp(), src);
assm->mov(dst.high_gp(), Operand(base, offset + 4));
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->movss(dst.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->movsd(dst.fp(), src);
break;
+ case ValueType::kS128:
+ assm->movdqu(dst.fp(), src);
+ break;
default:
UNREACHABLE();
}
@@ -62,18 +65,18 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
Operand dst(base, offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->mov(dst, src.gp());
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->mov(dst, src.low_gp());
assm->mov(Operand(base, offset + 4), src.high_gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->movss(dst, src.fp());
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->movsd(dst, src.fp());
break;
default:
@@ -82,22 +85,26 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->push(reg.gp());
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->push(reg.high_gp());
assm->push(reg.low_gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->AllocateStackSpace(sizeof(float));
assm->movss(Operand(esp, 0), reg.fp());
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->AllocateStackSpace(sizeof(double));
assm->movsd(Operand(esp, 0), reg.fp());
break;
+ case ValueType::kS128:
+ assm->AllocateStackSpace(sizeof(double) * 2);
+ assm->movdqu(Operand(esp, 0), reg.fp());
+ break;
default:
UNREACHABLE();
}
@@ -195,18 +202,18 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return ValueTypes::ElementSizeInBytes(type);
+ return type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) { return false; }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
break;
- case kWasmI64: {
+ case ValueType::kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -214,10 +221,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
- case kWasmF32:
+ case ValueType::kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -427,65 +434,56 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
Operand dst_op = Operand(dst_addr, offset_reg, times_1, offset_imm);
+ // i64 store uses a totally different approach, hence implement it separately.
+ if (type.value() == StoreType::kI64Store) {
+ auto scratch2 = GetUnusedRegister(kFpReg, pinned).fp();
+ movd(liftoff::kScratchDoubleReg, src.low().gp());
+ movd(scratch2, src.high().gp());
+ Punpckldq(liftoff::kScratchDoubleReg, scratch2);
+ movsd(dst_op, liftoff::kScratchDoubleReg);
+ // This lock+or is needed to achieve sequential consistency.
+ lock();
+ or_(Operand(esp, 0), Immediate(0));
+ return;
+ }
+
+ // Other i64 stores actually only use the low word.
+ if (src.is_pair()) src = src.low();
+ Register src_gp = src.gp();
+
+ bool is_byte_store = type.size() == 1;
+ LiftoffRegList src_candidates =
+ is_byte_store ? liftoff::kByteRegs : kGpCacheRegList;
pinned = pinned | LiftoffRegList::ForRegs(dst_addr, src, offset_reg);
- Register src_reg = src.is_gp_pair() ? src.low().gp() : src.gp();
- // If {src} is used after this operation, we are not allowed to overwrite it.
- // {kI64Store} and {kI(32|64)Store8} need special treatment below, so we don't
- // handle them here.
- if (type.value() != StoreType::kI64Store &&
- type.value() != StoreType::kI64Store8 &&
- type.value() != StoreType::kI32Store8 && cache_state()->is_used(src)) {
- Register old_src = src_reg;
- src_reg = GetUnusedRegister(kGpReg, pinned).gp();
- mov(src_reg, old_src);
+
+ // Ensure that {src} is a valid and otherwise unused register.
+ if (!src_candidates.has(src) || cache_state_.is_used(src)) {
+ // If there are no unused candidate registers, but {src} is a candidate,
+ // then spill other uses of {src}. Otherwise spill any candidate register
+ // and use that.
+ if (!cache_state_.has_unused_register(src_candidates, pinned) &&
+ src_candidates.has(src)) {
+ SpillRegister(src);
+ } else {
+ Register safe_src = GetUnusedRegister(src_candidates, pinned).gp();
+ mov(safe_src, src_gp);
+ src_gp = safe_src;
+ }
}
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- if (cache_state()->is_used(src)) {
- // Only the lower 4 registers can be addressed as 8-bit registers.
- if (cache_state()->has_unused_register(liftoff::kByteRegs, pinned)) {
- Register byte_src =
- GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
- mov(byte_src, src_reg);
- xchg_b(byte_src, dst_op);
- } else { // (if !cache_state()->has_unused_register(...))
- // No byte register is available, we have to spill {src}.
- push(src_reg);
- xchg_b(src_reg, dst_op);
- pop(src_reg);
- }
- } else { // if (!cache_state()->is_used(src)) {
- if (src_reg.is_byte_register()) {
- xchg_b(src_reg, dst_op);
- } else { // if (!src.gp().is_byte_register())
- Register byte_src =
- GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
- mov(byte_src, src_reg);
- xchg_b(byte_src, dst_op);
- }
- }
+ xchg_b(src_gp, dst_op);
return;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- xchg_w(src_reg, dst_op);
+ xchg_w(src_gp, dst_op);
return;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- xchg(src_reg, dst_op);
- return;
- case StoreType::kI64Store: {
- auto scratch2 = GetUnusedRegister(kFpReg, pinned).fp();
- movd(liftoff::kScratchDoubleReg, src.low().gp());
- movd(scratch2, src.high().gp());
- Punpckldq(liftoff::kScratchDoubleReg, scratch2);
- movsd(dst_op, liftoff::kScratchDoubleReg);
- // This lock+or is needed to achieve sequential consistency.
- lock();
- or_(Operand(esp, 0), Immediate(0));
+ xchg(src_gp, dst_op);
return;
- }
default:
UNREACHABLE();
}
@@ -521,6 +519,21 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { mfence(); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -565,21 +578,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
mov(dst, reg.gp());
break;
- case kWasmI64:
+ case ValueType::kI64:
mov(liftoff::GetHalfStackSlot(offset, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(offset, kHighWord), reg.high_gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
movss(dst, reg.fp());
break;
- case kWasmF64:
+ case ValueType::kF64:
movsd(dst, reg.fp());
break;
- case kWasmS128:
+ case ValueType::kS128:
movdqu(dst, reg.fp());
break;
default:
@@ -590,11 +603,11 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
mov(dst, Immediate(value.to_i32()));
break;
- case kWasmI64: {
+ case ValueType::kI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
mov(liftoff::GetHalfStackSlot(offset, kLowWord), Immediate(low_word));
@@ -609,21 +622,21 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
Operand src = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
mov(reg.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
mov(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
mov(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case kWasmF32:
+ case ValueType::kF32:
movss(reg.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
movsd(reg.fp(), src);
break;
- case kWasmS128:
+ case ValueType::kS128:
movdqu(reg.fp(), src);
break;
default:
@@ -1770,8 +1783,8 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
if (rhs != no_reg) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
cmp(lhs, rhs);
break;
default:
@@ -1912,6 +1925,111 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
liftoff::EmitFloatSetCond<&Assembler::ucomisd>(this, cond, dst, lhs, rhs);
}
+namespace liftoff {
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
+ void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
+void EmitSimdCommutativeBinOp(
+ LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs, base::Optional<CpuFeature> feature = base::nullopt) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), lhs.fp(), rhs.fp());
+ return;
+ }
+
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
+ if (dst.fp() == rhs.fp()) {
+ (assm->*sse_op)(dst.fp(), lhs.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) (assm->movaps)(dst.fp(), lhs.fp());
+ (assm->*sse_op)(dst.fp(), rhs.fp());
+ }
+}
+
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
+ void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
+void EmitSimdSub(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), lhs.fp(), rhs.fp());
+ } else if (lhs.fp() == rhs.fp()) {
+ assm->pxor(dst.fp(), dst.fp());
+ } else if (dst.fp() == rhs.fp()) {
+ assm->movaps(kScratchDoubleReg, rhs.fp());
+ assm->movaps(dst.fp(), lhs.fp());
+ (assm->*sse_op)(dst.fp(), kScratchDoubleReg);
+ } else {
+ if (dst.fp() != lhs.fp()) assm->movaps(dst.fp(), lhs.fp());
+ (assm->*sse_op)(dst.fp(), rhs.fp());
+ }
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movddup(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vshufpd(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ if (imm_lane_idx != 0) shufpd(dst.fp(), dst.fp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(fanchenk): Use movlhps and blendpd
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ if (imm_lane_idx == 0) {
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00000000);
+ vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01010000);
+ } else {
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00100000);
+ vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01110000);
+ }
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ if (imm_lane_idx == 0) {
+ insertps(dst.fp(), src2.fp(), 0b00000000);
+ insertps(dst.fp(), src2.fp(), 0b01010000);
+ } else {
+ insertps(dst.fp(), src2.fp(), 0b00100000);
+ insertps(dst.fp(), src2.fp(), 0b01110000);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddpd, &Assembler::addpd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vsubpd, &Assembler::subpd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulpd, &Assembler::mulpd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -1925,6 +2043,322 @@ void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ insertps(dst.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ }
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddps, &Assembler::addps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vsubps, &Assembler::subps>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulps, &Assembler::mulps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pinsrd(dst.fp(), src.low_gp(), 0);
+ Pinsrd(dst.fp(), src.high_gp(), 1);
+ Pshufd(dst.fp(), dst.fp(), 0x44);
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrd(dst.low_gp(), lhs.fp(), imm_lane_idx * 2);
+ Pextrd(dst.high_gp(), lhs.fp(), imm_lane_idx * 2 + 1);
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst.fp(), src1.fp(), src2.low_gp(), imm_lane_idx * 2);
+ vpinsrd(dst.fp(), dst.fp(), src2.high_gp(), imm_lane_idx * 2 + 1);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrd(dst.fp(), src2.low_gp(), imm_lane_idx * 2);
+ pinsrd(dst.fp(), src2.high_gp(), imm_lane_idx * 2 + 1);
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubq, &Assembler::psubq>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp1 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ LiftoffRegister tmp2 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
+ Movaps(tmp1.fp(), lhs.fp());
+ Movaps(tmp2.fp(), rhs.fp());
+ // Multiply high dword of each qword of left with right.
+ Psrlq(tmp1.fp(), 32);
+ Pmuludq(tmp1.fp(), tmp1.fp(), rhs.fp());
+ // Multiply high dword of each qword of right with left.
+ Psrlq(tmp2.fp(), 32);
+ Pmuludq(tmp2.fp(), tmp2.fp(), lhs.fp());
+ Paddq(tmp2.fp(), tmp2.fp(), tmp1.fp());
+ Psllq(tmp2.fp(), tmp2.fp(), 32);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpmuludq(dst.fp(), lhs.fp(), rhs.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ pmuludq(dst.fp(), rhs.fp());
+ }
+ Paddq(dst.fp(), dst.fp(), tmp2.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshufd(dst.fp(), dst.fp(), 0);
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrd(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrd(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubd, &Assembler::psubd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmulld, &Assembler::pmulld>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshuflw(dst.fp(), dst.fp(), 0);
+ Pshufd(dst.fp(), dst.fp(), 0);
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsx_w(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrw(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubw, &Assembler::psubw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmullw, &Assembler::pmullw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsx_b(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrb(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrb(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubb, &Assembler::psubb>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ vpsrlw(tmp.fp(), lhs.fp(), 8);
+ vpsrlw(liftoff::kScratchDoubleReg, rhs.fp(), 8);
+ // t = I16x8Mul(t0, t1)
+ //    => __PP __PP ...  __PP  __PP
+ vpmullw(tmp.fp(), tmp.fp(), liftoff::kScratchDoubleReg);
+ // s = left * 256
+ vpsllw(liftoff::kScratchDoubleReg, lhs.fp(), 8);
+ // dst = I16x8Mul(left * 256, right)
+ //    => pp__ pp__ ...  pp__  pp__
+ vpmullw(dst.fp(), liftoff::kScratchDoubleReg, rhs.fp());
+ // dst = I16x8Shr(dst, 8)
+ //    => 00pp 00pp ...  00pp  00pp
+ vpsrlw(dst.fp(), dst.fp(), 8);
+ // t = I16x8Shl(t, 8)
+ //    => PP00 PP00 ...  PP00  PP00
+ vpsllw(tmp.fp(), tmp.fp(), 8);
+ // dst = I16x8Or(dst, t)
+ //    => PPpp PPpp ...  PPpp  PPpp
+ vpor(dst.fp(), dst.fp(), tmp.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ movaps(tmp.fp(), dst.fp());
+ movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ psrlw(tmp.fp(), 8);
+ psrlw(liftoff::kScratchDoubleReg, 8);
+ // dst = left * 256
+ psllw(dst.fp(), 8);
+ // t = I16x8Mul(t, s)
+ //    => __PP __PP ...  __PP  __PP
+ pmullw(tmp.fp(), liftoff::kScratchDoubleReg);
+ // dst = I16x8Mul(left * 256, right)
+ //    => pp__ pp__ ...  pp__  pp__
+ pmullw(dst.fp(), rhs.fp());
+ // t = I16x8Shl(t, 8)
+ //    => PP00 PP00 ...  PP00  PP00
+ psllw(tmp.fp(), 8);
+ // dst = I16x8Shr(dst, 8)
+ //    => 00pp 00pp ...  00pp  00pp
+ psrlw(dst.fp(), 8);
+ // dst = I16x8Or(dst, t)
+ //    => PPpp PPpp ...  PPpp  PPpp
+ por(dst.fp(), tmp.fp());
+ }
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
cmp(esp, Operand(limit_address, 0));
j(below_equal, ool_code);
@@ -1985,7 +2419,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -1995,7 +2429,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
liftoff::Store(this, esp, arg_bytes, *args++, param_type);
- arg_bytes += ValueTypes::MemSize(param_type);
+ arg_bytes += param_type.element_size_bytes();
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2035,7 +2469,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
wasm_call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// Since we have more cache registers than parameter registers, the
@@ -2063,13 +2497,20 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
-void LiftoffAssembler::DebugBreak() { int3(); }
-
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
+ // The combination of AllocateStackSpace and 2 movdqu is usually smaller
+ // in code size than doing 4 pushes.
+ if (src.type() == kWasmS128) {
+ asm_->AllocateStackSpace(sizeof(double) * 2);
+ asm_->movdqu(liftoff::kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_));
+ asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
+ break;
+ }
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 68a8ad6e48..74df00590f 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -22,6 +22,8 @@ namespace wasm {
using VarState = LiftoffAssembler::VarState;
+constexpr ValueType LiftoffAssembler::kWasmIntPtr;
+
namespace {
class StackTransferRecipe {
@@ -214,6 +216,10 @@ class StackTransferRecipe {
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type);
+ // load_dst_regs_.set above will set both low and high fp regs.
+ // But unlike gp_pair, we load a kWasm128 in one go in ExecuteLoads.
+ // So unset the top fp register to skip loading it.
+ load_dst_regs_.clear(dst.high());
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
} else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
@@ -353,7 +359,9 @@ class RegisterReuseMap {
base::Optional<LiftoffRegister> Lookup(LiftoffRegister src) {
for (auto it = map_.begin(), end = map_.end(); it != end; it += 2) {
- if (it->is_gp_pair() == src.is_gp_pair() && *it == src) return *(it + 1);
+ if (it->is_gp_pair() == src.is_gp_pair() &&
+ it->is_fp_pair() == src.is_fp_pair() && *it == src)
+ return *(it + 1);
}
return {};
}
@@ -499,10 +507,8 @@ LiftoffAssembler::~LiftoffAssembler() {
}
}
-LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
- DCHECK(!cache_state_.stack_state.empty());
- VarState slot = cache_state_.stack_state.back();
- cache_state_.stack_state.pop_back();
+LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
+ LiftoffRegList pinned) {
switch (slot.loc()) {
case VarState::kStack: {
LiftoffRegister reg =
@@ -524,6 +530,24 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
UNREACHABLE();
}
+LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
+ DCHECK(!cache_state_.stack_state.empty());
+ VarState slot = cache_state_.stack_state.back();
+ cache_state_.stack_state.pop_back();
+ return LoadToRegister(slot, pinned);
+}
+
+LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
+ LiftoffRegList pinned) {
+ DCHECK_LT(index, cache_state_.stack_state.size());
+ VarState& slot = cache_state_.stack_state.end()[-1 - index];
+ LiftoffRegister reg = LoadToRegister(slot, pinned);
+ if (!slot.is_reg()) {
+ slot.MakeRegister(reg);
+ }
+ return reg;
+}
+
void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
const CacheState& source) {
DCHECK_EQ(source.stack_height(), target.stack_height());
@@ -590,55 +614,24 @@ void LiftoffAssembler::SpillAllRegisters() {
cache_state_.reset_used_registers();
}
-void LiftoffAssembler::PrepareCall(FunctionSig* sig,
- compiler::CallDescriptor* call_descriptor,
- Register* target,
- Register* target_instance) {
- uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
- // Input 0 is the call target.
- constexpr size_t kInputShift = 1;
-
- // Spill all cache slots which are not being used as parameters.
- // Don't update any register use counters, they will be reset later anyway.
- for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
- idx < end; ++idx) {
- VarState& slot = cache_state_.stack_state[idx];
- if (!slot.is_reg()) continue;
- Spill(slot.offset(), slot.reg(), slot.type());
- slot.MakeStack();
- }
-
- LiftoffStackSlots stack_slots(this);
- StackTransferRecipe stack_transfers(this);
- LiftoffRegList param_regs;
-
- // Move the target instance (if supplied) into the correct instance register.
- compiler::LinkageLocation instance_loc =
- call_descriptor->GetInputLocation(kInputShift);
- DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
- Register instance_reg = Register::from_code(instance_loc.AsRegister());
- param_regs.set(instance_reg);
- if (target_instance && *target_instance != instance_reg) {
- stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
- LiftoffRegister(*target_instance),
- kWasmIntPtr);
- }
-
- // Now move all parameter values into the right slot for the call.
- // Don't pop values yet, such that the stack height is still correct when
- // executing the {stack_transfers}.
+namespace {
+void PrepareStackTransfers(const FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ const VarState* slots,
+ LiftoffStackSlots* stack_slots,
+ StackTransferRecipe* stack_transfers,
+ LiftoffRegList* param_regs) {
// Process parameters backwards, such that pushes of caller frame slots are
// in the correct order.
- uint32_t param_base = cache_state_.stack_height() - num_params;
uint32_t call_desc_input_idx =
static_cast<uint32_t>(call_descriptor->InputCount());
+ uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
for (uint32_t i = num_params; i > 0; --i) {
const uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
const bool is_gp_pair = kNeedI64RegPair && type == kWasmI64;
const int num_lowered_params = is_gp_pair ? 2 : 1;
- const uint32_t stack_idx = param_base + param;
- const VarState& slot = cache_state_.stack_state[stack_idx];
+ const VarState& slot = slots[param];
const uint32_t stack_offset = slot.offset();
// Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack.
@@ -671,21 +664,81 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
reg = LiftoffRegister::from_code(rc, reg_code);
}
- param_regs.set(reg);
+ param_regs->set(reg);
if (is_gp_pair) {
- stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset,
- half);
+ stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
+ half);
} else {
- stack_transfers.LoadIntoRegister(reg, slot, stack_offset);
+ stack_transfers->LoadIntoRegister(reg, slot, stack_offset);
}
} else {
DCHECK(loc.IsCallerFrameSlot());
- stack_slots.Add(slot, stack_offset, half);
+ stack_slots->Add(slot, stack_offset, half);
}
}
}
- // {call_desc_input_idx} should point after the instance parameter now.
- DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
+}
+
+} // namespace
+
+void LiftoffAssembler::PrepareBuiltinCall(
+ const FunctionSig* sig, compiler::CallDescriptor* call_descriptor,
+ std::initializer_list<VarState> params) {
+ LiftoffStackSlots stack_slots(this);
+ StackTransferRecipe stack_transfers(this);
+ LiftoffRegList param_regs;
+ PrepareStackTransfers(sig, call_descriptor, params.begin(), &stack_slots,
+ &stack_transfers, &param_regs);
+ // Create all the slots.
+ stack_slots.Construct();
+ // Execute the stack transfers before filling the instance register.
+ stack_transfers.Execute();
+
+ // Reset register use counters.
+ cache_state_.reset_used_registers();
+ SpillAllRegisters();
+}
+
+void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register* target,
+ Register* target_instance) {
+ uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
+ // Input 0 is the call target.
+ constexpr size_t kInputShift = 1;
+
+ // Spill all cache slots which are not being used as parameters.
+ // Don't update any register use counters, they will be reset later anyway.
+ for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
+ idx < end; ++idx) {
+ VarState& slot = cache_state_.stack_state[idx];
+ if (!slot.is_reg()) continue;
+ Spill(slot.offset(), slot.reg(), slot.type());
+ slot.MakeStack();
+ }
+
+ LiftoffStackSlots stack_slots(this);
+ StackTransferRecipe stack_transfers(this);
+ LiftoffRegList param_regs;
+
+ // Move the target instance (if supplied) into the correct instance register.
+ compiler::LinkageLocation instance_loc =
+ call_descriptor->GetInputLocation(kInputShift);
+ DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
+ Register instance_reg = Register::from_code(instance_loc.AsRegister());
+ param_regs.set(instance_reg);
+ if (target_instance && *target_instance != instance_reg) {
+ stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
+ LiftoffRegister(*target_instance),
+ kWasmIntPtr);
+ }
+
+ if (num_params) {
+ uint32_t param_base = cache_state_.stack_height() - num_params;
+ PrepareStackTransfers(sig, call_descriptor,
+ &cache_state_.stack_state[param_base], &stack_slots,
+ &stack_transfers, &param_regs);
+ }
// If the target register overlaps with a parameter register, then move the
// target to another free register, or spill to the stack.
@@ -721,7 +774,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
}
}
-void LiftoffAssembler::FinishCall(FunctionSig* sig,
+void LiftoffAssembler::FinishCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
const size_t return_count = sig->return_count();
if (return_count != 0) {
@@ -779,7 +832,7 @@ void LiftoffAssembler::ParallelRegisterMove(
}
}
-void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
+void LiftoffAssembler::MoveToReturnRegisters(const FunctionSig* sig) {
// We do not support multi-value yet.
DCHECK_EQ(1, sig->return_count());
ValueType return_type = sig->GetReturn(0);
@@ -916,7 +969,7 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
}
std::ostream& operator<<(std::ostream& os, VarState slot) {
- os << ValueTypes::TypeName(slot.type()) << ":";
+ os << slot.type().type_name() << ":";
switch (slot.loc()) {
case VarState::kStack:
return os << "s";
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 868b9390d1..6573ff4aa4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -103,6 +103,11 @@ class LiftoffAssembler : public TurboAssembler {
void MakeStack() { loc_ = kStack; }
+ void MakeRegister(LiftoffRegister r) {
+ reg_ = r;
+ loc_ = kRegister;
+ }
+
// Copy src to this, except for offset, since src and this could have been
// from different stack states.
void Copy(VarState src) {
@@ -282,6 +287,16 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
+ // Returns the register which holds the value of stack slot {index}. If the
+ // value is not stored in a register yet, a register is allocated for it. The
+ // register is then assigned to the stack slot. The value stack height is not
+ // modified. The top of the stack is index 0, i.e. {PopToRegister()} and
+ // {PeekToRegister(0)} should result in the same register.
+ // {PeekToRegister} already decrements the used count of the register of the
+ // stack slot. Therefore the register must not be popped by {PopToRegister}
+ // but discarded with {stack_state.pop_back(count)}.
+ LiftoffRegister PeekToRegister(int index, LiftoffRegList pinned);
+
int NextSpillOffset(ValueType type) {
int offset = TopSpillOffset() + SlotSizeForType(type);
if (NeedsAlignment(type)) {
@@ -374,13 +389,18 @@ class LiftoffAssembler : public TurboAssembler {
}
// Load parameters into the right registers / stack slots for the call.
+ void PrepareBuiltinCall(const FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ std::initializer_list<VarState> params);
+
+ // Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
- void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
+ void PrepareCall(const FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
Register* target_instance = nullptr);
// Process return values of the call.
- void FinishCall(FunctionSig*, compiler::CallDescriptor*);
+ void FinishCall(const FunctionSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
@@ -398,7 +418,7 @@ class LiftoffAssembler : public TurboAssembler {
};
void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
- void MoveToReturnRegisters(FunctionSig*);
+ void MoveToReturnRegisters(const FunctionSig*);
#ifdef ENABLE_SLOW_DCHECKS
// Validate that the register use counts reflect the state of the cache.
@@ -465,6 +485,18 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
+ inline void AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister result,
+ StoreType type);
+
+ inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister expected,
+ LiftoffRegister new_value,
+ LiftoffRegister value, StoreType type);
+
+ inline void AtomicFence();
+
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
@@ -679,7 +711,86 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f64_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
+ inline void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void StackCheck(Label* ool_code, Register limit_address);
@@ -697,13 +808,13 @@ class LiftoffAssembler : public TurboAssembler {
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
- inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
+ inline void CallC(const FunctionSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueType out_argument_type,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
- inline void CallIndirect(FunctionSig* sig,
+ inline void CallIndirect(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
@@ -712,7 +823,6 @@ class LiftoffAssembler : public TurboAssembler {
inline void AllocateStackSlot(Register addr, uint32_t size);
inline void DeallocateStackSlot(uint32_t size);
- inline void DebugBreak();
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
@@ -756,6 +866,8 @@ class LiftoffAssembler : public TurboAssembler {
}
private:
+ LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
+
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
union {
@@ -927,7 +1039,7 @@ class LiftoffStackSlots {
#include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index f396b76244..377cd1b5a8 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -81,6 +81,9 @@ struct assert_field_size {
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+constexpr ValueType kPointerValueType =
+ kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
+
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Moving labels would confuse the Assembler,
@@ -155,21 +158,25 @@ constexpr Condition GetCompareCondition(WasmOpcode opcode) {
// Builds a {DebugSideTable}.
class DebugSideTableBuilder {
public:
+ enum AssumeSpilling {
+ // All register values will be spilled before the pc covered by the debug
+ // side table entry. Register slots will be marked as stack slots in the
+ // generated debug side table entry.
+ kAssumeSpilling,
+ // Register slots will be written out as they are.
+ kAllowRegisters,
+ // Register slots cannot appear since we already spilled.
+ kDidSpill
+ };
+
class EntryBuilder {
public:
- explicit EntryBuilder(
- int pc_offset, std::vector<ValueType> stack_types,
- std::vector<int> stack_offsets,
- std::vector<DebugSideTable::Entry::Constant> constants)
- : pc_offset_(pc_offset),
- stack_types_(std::move(stack_types)),
- stack_offsets_(std::move(stack_offsets)),
- constants_(std::move(constants)) {}
+ explicit EntryBuilder(int pc_offset,
+ std::vector<DebugSideTable::Entry::Value> values)
+ : pc_offset_(pc_offset), values_(std::move(values)) {}
- DebugSideTable::Entry ToTableEntry() const {
- return DebugSideTable::Entry{pc_offset_, std::move(stack_types_),
- std::move(stack_offsets_),
- std::move(constants_)};
+ DebugSideTable::Entry ToTableEntry() {
+ return DebugSideTable::Entry{pc_offset_, std::move(values_)};
}
int pc_offset() const { return pc_offset_; }
@@ -177,59 +184,65 @@ class DebugSideTableBuilder {
private:
int pc_offset_;
- std::vector<ValueType> stack_types_;
- std::vector<int> stack_offsets_;
- std::vector<DebugSideTable::Entry::Constant> constants_;
+ std::vector<DebugSideTable::Entry::Value> values_;
};
// Adds a new entry, and returns a pointer to a builder for modifying that
// entry ({stack_height} includes {num_locals}).
EntryBuilder* NewEntry(int pc_offset, int num_locals, int stack_height,
- LiftoffAssembler::VarState* stack_state) {
+ LiftoffAssembler::VarState* stack_state,
+ AssumeSpilling assume_spilling) {
DCHECK_LE(num_locals, stack_height);
// Record stack types.
- int stack_height_without_locals = stack_height - num_locals;
- std::vector<ValueType> stack_types(stack_height_without_locals);
- for (int i = 0; i < stack_height_without_locals; ++i) {
- stack_types[i] = stack_state[num_locals + i].type();
- }
- // Record stack offsets.
- std::vector<int> stack_offsets(stack_height_without_locals);
- for (int i = 0; i < stack_height_without_locals; ++i) {
- stack_offsets[i] = stack_state[num_locals + i].offset();
- }
- // Record all constants on the locals and stack.
- std::vector<DebugSideTable::Entry::Constant> constants;
- for (int idx = 0; idx < stack_height; ++idx) {
- auto& slot = stack_state[idx];
- if (slot.is_const()) constants.push_back({idx, slot.i32_const()});
- }
- entries_.emplace_back(pc_offset, std::move(stack_types),
- std::move(stack_offsets), std::move(constants));
+ std::vector<DebugSideTable::Entry::Value> values(stack_height);
+ for (int i = 0; i < stack_height; ++i) {
+ const auto& slot = stack_state[i];
+ values[i].type = slot.type();
+ values[i].stack_offset = slot.offset();
+ switch (slot.loc()) {
+ case kIntConst:
+ values[i].kind = DebugSideTable::Entry::kConstant;
+ values[i].i32_const = slot.i32_const();
+ break;
+ case kRegister:
+ DCHECK_NE(kDidSpill, assume_spilling);
+ if (assume_spilling == kAllowRegisters) {
+ values[i].kind = DebugSideTable::Entry::kRegister;
+ values[i].reg_code = slot.reg().liftoff_code();
+ break;
+ }
+ DCHECK_EQ(kAssumeSpilling, assume_spilling);
+ V8_FALLTHROUGH;
+ case kStack:
+ values[i].kind = DebugSideTable::Entry::kStack;
+ values[i].stack_offset = slot.offset();
+ break;
+ }
+ }
+ entries_.emplace_back(pc_offset, std::move(values));
return &entries_.back();
}
- void AddLocalType(ValueType type, int stack_offset) {
- local_types_.push_back(type);
- local_stack_offsets_.push_back(stack_offset);
+ void SetNumLocals(int num_locals) {
+ DCHECK_EQ(-1, num_locals_);
+ DCHECK_LE(0, num_locals);
+ num_locals_ = num_locals;
}
- DebugSideTable GenerateDebugSideTable() {
- std::vector<DebugSideTable::Entry> table_entries;
- table_entries.reserve(entries_.size());
- for (auto& entry : entries_) table_entries.push_back(entry.ToTableEntry());
- std::sort(table_entries.begin(), table_entries.end(),
+ std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
+ DCHECK_LE(0, num_locals_);
+ std::vector<DebugSideTable::Entry> entries;
+ entries.reserve(entries_.size());
+ for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
+ std::sort(entries.begin(), entries.end(),
[](DebugSideTable::Entry& a, DebugSideTable::Entry& b) {
return a.pc_offset() < b.pc_offset();
});
- return DebugSideTable{std::move(local_types_),
- std::move(local_stack_offsets_),
- std::move(table_entries)};
+ return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
}
private:
- std::vector<ValueType> local_types_;
- std::vector<int> local_stack_offsets_;
+ int num_locals_ = -1;
std::list<EntryBuilder> entries_;
};
@@ -287,7 +300,8 @@ class LiftoffCompiler {
CompilationEnv* env, Zone* compilation_zone,
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
- Vector<int> breakpoints = {})
+ Vector<int> breakpoints = {},
+ Vector<int> extra_source_pos = {})
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
@@ -296,7 +310,16 @@ class LiftoffCompiler {
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_),
next_breakpoint_ptr_(breakpoints.begin()),
- next_breakpoint_end_(breakpoints.end()) {}
+ next_breakpoint_end_(breakpoints.end()),
+ next_extra_source_pos_ptr_(extra_source_pos.begin()),
+ next_extra_source_pos_end_(extra_source_pos.end()) {
+ if (breakpoints.empty()) {
+ next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
+ }
+ if (extra_source_pos.empty()) {
+ next_extra_source_pos_ptr_ = next_extra_source_pos_end_ = nullptr;
+ }
+ }
bool did_bailout() const { return bailout_reason_ != kSuccess; }
LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
@@ -310,10 +333,9 @@ class LiftoffCompiler {
return source_position_table_builder_.ToSourcePositionTableVector();
}
- OwnedVector<trap_handler::ProtectedInstructionData> GetProtectedInstructions()
- const {
- return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
- protected_instructions_);
+ OwnedVector<uint8_t> GetProtectedInstructionsData() const {
+ return OwnedVector<uint8_t>::Of(
+ Vector<const uint8_t>::cast(VectorOf(protected_instructions_)));
}
uint32_t GetTotalFrameSlotCount() const {
@@ -338,16 +360,16 @@ class LiftoffCompiler {
}
LiftoffBailoutReason BailoutReasonForType(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return kSimd;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
return kAnyRef;
- case kWasmExnRef:
+ case ValueType::kExnRef:
return kExceptionHandling;
- case kWasmBottom:
+ case ValueType::kBottom:
return kMultiValue;
default:
return kOtherReason;
@@ -368,7 +390,7 @@ class LiftoffCompiler {
}
LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
EmbeddedVector<char, 128> buffer;
- SNPrintF(buffer, "%s %s", ValueTypes::TypeName(type), context);
+ SNPrintF(buffer, "%s %s", type.type_name(), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -476,10 +498,11 @@ class LiftoffCompiler {
}
void StackCheck(WasmCodePosition position) {
+ DEBUG_CODE_COMMENT("stack check");
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return;
- out_of_line_code_.push_back(
- OutOfLineCode::StackCheck(position, __ cache_state()->used_registers,
- RegisterDebugSideTableEntry()));
+ out_of_line_code_.push_back(OutOfLineCode::StackCheck(
+ position, __ cache_state()->used_registers,
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
Register limit_address = __ GetUnusedRegister(kGpReg).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
@@ -526,6 +549,9 @@ class LiftoffCompiler {
uint32_t num_params =
static_cast<uint32_t>(decoder->sig_->parameter_count());
+ __ CodeEntry();
+
+ DEBUG_CODE_COMMENT("enter frame");
__ EnterFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(true);
pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
@@ -538,6 +564,7 @@ class LiftoffCompiler {
if (DidAssemblerBailout(decoder)) return;
// Process parameters.
+ if (num_params) DEBUG_CODE_COMMENT("process parameters");
__ SpillInstance(instance_reg);
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
@@ -548,6 +575,7 @@ class LiftoffCompiler {
DCHECK_EQ(input_idx, descriptor_->InputCount());
// Initialize locals beyond parameters.
+ if (num_params < __ num_locals()) DEBUG_CODE_COMMENT("init locals");
if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
@@ -566,16 +594,37 @@ class LiftoffCompiler {
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
- // Register local types and stack offsets for the debug side table.
if (V8_UNLIKELY(debug_sidetable_builder_)) {
- for (uint32_t i = 0; i < __ num_locals(); ++i) {
- debug_sidetable_builder_->AddLocalType(
- __ local_type(i), __ cache_state()->stack_state[i].offset());
- }
+ debug_sidetable_builder_->SetNumLocals(__ num_locals());
}
+
// The function-prologue stack check is associated with position 0, which
// is never a position of any instruction in the function.
StackCheck(0);
+
+ // If we are generating debug code, do check the "hook on function call"
+ // flag. If set, trigger a break.
+ if (V8_UNLIKELY(env_->debug)) {
+ // If there is a breakpoint set on the first instruction (== start of the
+ // function), then skip the check for "hook on function call", since we
+ // will unconditionally break there anyway.
+ bool has_breakpoint = next_breakpoint_ptr_ != nullptr &&
+ (*next_breakpoint_ptr_ == 0 ||
+ *next_breakpoint_ptr_ == decoder->position());
+ if (!has_breakpoint) {
+ DEBUG_CODE_COMMENT("check hook on function call");
+ Register flag = __ GetUnusedRegister(kGpReg).gp();
+ LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
+ kSystemPointerSize);
+ Label no_break;
+ __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U,
+ {});
+ // Unary "equal" means "equals zero".
+ __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
+ EmitBreakpoint(decoder);
+ __ bind(&no_break);
+ }
+ }
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
@@ -597,6 +646,7 @@ class LiftoffCompiler {
// In this mode, we never generate stack checks.
DCHECK(!is_stack_check);
__ CallTrapCallbackForTesting();
+ DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
@@ -606,7 +656,7 @@ class LiftoffCompiler {
if (!ool->regs_to_save.is_empty()) __ PushRegisters(ool->regs_to_save);
source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(ool->position), false);
+ __ pc_offset(), SourcePosition(ool->position), true);
__ CallRuntimeStub(ool->stub);
DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder);
if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)) {
@@ -623,8 +673,6 @@ class LiftoffCompiler {
}
void FinishFunction(FullDecoder* decoder) {
- // All breakpoints (if any) must be emitted by now.
- DCHECK_NULL(next_breakpoint_ptr_);
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(&ool);
@@ -645,22 +693,52 @@ class LiftoffCompiler {
}
void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
- if (V8_UNLIKELY(next_breakpoint_ptr_) &&
- *next_breakpoint_ptr_ == decoder->position()) {
- ++next_breakpoint_ptr_;
- if (next_breakpoint_ptr_ == next_breakpoint_end_) {
- next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
+ bool breakpoint = false;
+ if (V8_UNLIKELY(next_breakpoint_ptr_)) {
+ if (*next_breakpoint_ptr_ == 0) {
+ // A single breakpoint at offset 0 indicates stepping.
+ DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
+ if (WasmOpcodes::IsBreakable(opcode)) {
+ breakpoint = true;
+ EmitBreakpoint(decoder);
+ }
+ } else {
+ while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
+ *next_breakpoint_ptr_ < decoder->position()) {
+ // Skip unreachable breakpoints.
+ ++next_breakpoint_ptr_;
+ }
+ if (next_breakpoint_ptr_ == next_breakpoint_end_) {
+ next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
+ } else if (*next_breakpoint_ptr_ == decoder->position()) {
+ DCHECK(WasmOpcodes::IsBreakable(opcode));
+ breakpoint = true;
+ EmitBreakpoint(decoder);
+ }
}
- EmitBreakpoint();
}
+ // Potentially generate the source position to OSR to this instruction.
+ MaybeGenerateExtraSourcePos(decoder, !breakpoint);
TraceCacheState(decoder);
+#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
+ if (WasmOpcodes::IsPrefixOpcode(opcode) &&
+ decoder->pc() + 1 < decoder->end()) {
+ byte op_index = *(decoder->pc() + 1);
+ opcode = static_cast<WasmOpcode>(opcode << 8 | op_index);
+ }
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
+#endif
}
- void EmitBreakpoint() {
+ void EmitBreakpoint(FullDecoder* decoder) {
DEBUG_CODE_COMMENT("breakpoint");
- // TODO(clemensb): Actually emit a breakpoint.
+ DCHECK(env_->debug);
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+ __ CallRuntimeStub(WasmCode::kWasmDebugBreak);
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kAllowRegisters);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
}
void Block(FullDecoder* decoder, Control* block) {}
@@ -780,7 +858,7 @@ class LiftoffCompiler {
enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
- void GenerateCCall(const LiftoffRegister* result_regs, FunctionSig* sig,
+ void GenerateCCall(const LiftoffRegister* result_regs, const FunctionSig* sig,
ValueType out_argument_type,
const LiftoffRegister* arg_regs,
ExternalReference ext_ref) {
@@ -790,17 +868,17 @@ class LiftoffCompiler {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
for (ValueType param_type : sig->parameters()) {
- param_bytes += ValueTypes::MemSize(param_type);
+ param_bytes += param_type.element_size_bytes();
}
int out_arg_bytes = out_argument_type == kWasmStmt
? 0
- : ValueTypes::MemSize(out_argument_type);
+ : out_argument_type.element_size_bytes();
int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
ext_ref);
}
- template <ValueType src_type, ValueType result_type, class EmitFn>
+ template <ValueType::Kind src_type, ValueType::Kind result_type, class EmitFn>
void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_type);
constexpr RegClass result_rc = reg_class_for(result_type);
@@ -809,25 +887,25 @@ class LiftoffCompiler {
? __ GetUnusedRegister(result_rc, {src})
: __ GetUnusedRegister(result_rc);
fn(dst, src);
- __ PushRegister(result_type, dst);
+ __ PushRegister(ValueType(result_type), dst);
}
- template <ValueType type>
+ template <ValueType::Kind type>
void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
- ValueType sig_reps[] = {type};
+ ValueType sig_reps[] = {ValueType(type)};
FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, type, &src, ext_ref);
+ GenerateCCall(&dst, &sig, ValueType(type), &src, ext_ref);
};
EmitUnOp<type, type>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
- template <ValueType dst_type, ValueType src_type,
+ template <ValueType::Kind dst_type, ValueType::Kind src_type,
TypeConversionTrapping can_trap>
void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
WasmCodePosition trap_position) {
@@ -846,54 +924,55 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
- ValueType sig_reps[] = {kWasmI32, src_type};
+ ValueType sig_reps[] = {kWasmI32, ValueType(src_type)};
FunctionSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
- GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
+ GenerateCCall(dst_regs, &sig, ValueType(dst_type), &src, ext_ref);
__ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
} else {
- ValueType sig_reps[] = {src_type};
+ ValueType sig_reps[] = {ValueType(src_type)};
FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
+ GenerateCCall(&dst, &sig, ValueType(dst_type), &src, ext_ref);
}
}
- __ PushRegister(dst_type, dst);
+ __ PushRegister(ValueType(dst_type), dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case kExpr##opcode: \
- EmitUnOp<kWasmI32, kWasmI32>( \
+ EmitUnOp<ValueType::kI32, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.gp(), src.gp()); \
}); \
break;
#define CASE_I64_UNOP(opcode, fn) \
case kExpr##opcode: \
- EmitUnOp<kWasmI64, kWasmI64>( \
+ EmitUnOp<ValueType::kI64, ValueType::kI64>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst, src); \
}); \
break;
#define CASE_FLOAT_UNOP(opcode, type, fn) \
case kExpr##opcode: \
- EmitUnOp<kWasm##type, kWasm##type>( \
+ EmitUnOp<ValueType::k##type, ValueType::k##type>( \
[=](LiftoffRegister dst, LiftoffRegister src) { \
__ emit_##fn(dst.fp(), src.fp()); \
}); \
break;
-#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
- case kExpr##opcode: \
- EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
- &ExternalReference::wasm_##fn); \
+#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
+ case kExpr##opcode: \
+ EmitFloatUnOpWithCFallback<ValueType::k##type>( \
+ &LiftoffAssembler::emit_##fn, &ExternalReference::wasm_##fn); \
break;
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
case kExpr##opcode: \
- EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
- kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); \
+ EmitTypeConversion<ValueType::k##dst_type, ValueType::k##src_type, \
+ can_trap>(kExpr##opcode, ext_ref, \
+ can_trap ? decoder->position() : 0); \
break;
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
@@ -959,19 +1038,19 @@ class LiftoffCompiler {
outstanding_op_ = kExprI32Eqz;
break;
}
- EmitUnOp<kWasmI32, kWasmI32>(
+ EmitUnOp<ValueType::kI32, ValueType::kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
__ emit_i32_eqz(dst.gp(), src.gp());
});
break;
case kExprI64Eqz:
- EmitUnOp<kWasmI64, kWasmI32>(
+ EmitUnOp<ValueType::kI64, ValueType::kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
__ emit_i64_eqz(dst.gp(), src);
});
break;
case kExprI32Popcnt:
- EmitUnOp<kWasmI32, kWasmI32>(
+ EmitUnOp<ValueType::kI32, ValueType::kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
@@ -981,7 +1060,7 @@ class LiftoffCompiler {
});
break;
case kExprI64Popcnt:
- EmitUnOp<kWasmI64, kWasmI64>(
+ EmitUnOp<ValueType::kI64, ValueType::kI64>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
@@ -1015,8 +1094,8 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
- template <ValueType src_type, ValueType result_type, typename EmitFn,
- typename EmitFnImm>
+ template <ValueType::Kind src_type, ValueType::Kind result_type,
+ typename EmitFn, typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
@@ -1033,7 +1112,7 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc);
fnImm(dst, lhs, imm);
- __ PushRegister(result_type, dst);
+ __ PushRegister(ValueType(result_type), dst);
} else {
// The RHS was not an immediate.
LiftoffRegister rhs = __ PopToRegister();
@@ -1042,11 +1121,12 @@ class LiftoffCompiler {
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
fn(dst, lhs, rhs);
- __ PushRegister(result_type, dst);
+ __ PushRegister(ValueType(result_type), dst);
}
}
- template <ValueType src_type, ValueType result_type, typename EmitFn>
+ template <ValueType::Kind src_type, ValueType::Kind result_type,
+ typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
@@ -1056,7 +1136,7 @@ class LiftoffCompiler {
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
fn(dst, lhs, rhs);
- __ PushRegister(result_type, dst);
+ __ PushRegister(ValueType(result_type), dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1087,13 +1167,13 @@ class LiftoffCompiler {
const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case kExpr##opcode: \
- return EmitBinOp<kWasmI32, kWasmI32>( \
+ return EmitBinOp<ValueType::kI32, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
});
#define CASE_I32_BINOPI(opcode, fn) \
case kExpr##opcode: \
- return EmitBinOpImm<kWasmI32, kWasmI32>( \
+ return EmitBinOpImm<ValueType::kI32, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
}, \
@@ -1102,13 +1182,13 @@ class LiftoffCompiler {
});
#define CASE_I64_BINOP(opcode, fn) \
case kExpr##opcode: \
- return EmitBinOp<kWasmI64, kWasmI64>( \
+ return EmitBinOp<ValueType::kI64, ValueType::kI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
});
#define CASE_I64_BINOPI(opcode, fn) \
case kExpr##opcode: \
- return EmitBinOpImm<kWasmI64, kWasmI64>( \
+ return EmitBinOpImm<ValueType::kI64, ValueType::kI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
}, \
@@ -1117,7 +1197,7 @@ class LiftoffCompiler {
});
#define CASE_FLOAT_BINOP(opcode, type, fn) \
case kExpr##opcode: \
- return EmitBinOp<kWasm##type, kWasm##type>( \
+ return EmitBinOp<ValueType::k##type, ValueType::k##type>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
});
@@ -1129,32 +1209,32 @@ class LiftoffCompiler {
outstanding_op_ = kExpr##opcode; \
break; \
} \
- return EmitBinOp<kWasmI32, kWasmI32>( \
+ return EmitBinOp<ValueType::kI32, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
constexpr Condition cond = GetCompareCondition(kExpr##opcode); \
__ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
});
#define CASE_I64_CMPOP(opcode, cond) \
case kExpr##opcode: \
- return EmitBinOp<kWasmI64, kWasmI32>( \
+ return EmitBinOp<ValueType::kI64, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_i64_set_cond(cond, dst.gp(), lhs, rhs); \
});
#define CASE_F32_CMPOP(opcode, cond) \
case kExpr##opcode: \
- return EmitBinOp<kWasmF32, kWasmI32>( \
+ return EmitBinOp<ValueType::kF32, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
});
#define CASE_F64_CMPOP(opcode, cond) \
case kExpr##opcode: \
- return EmitBinOp<kWasmF64, kWasmI32>( \
+ return EmitBinOp<ValueType::kF64, ValueType::kI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_f64_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
});
#define CASE_I64_SHIFTOP(opcode, fn) \
case kExpr##opcode: \
- return EmitBinOpImm<kWasmI64, kWasmI64>( \
+ return EmitBinOpImm<ValueType::kI64, ValueType::kI64>( \
[=](LiftoffRegister dst, LiftoffRegister src, \
LiftoffRegister amount) { \
__ emit_##fn(dst, src, \
@@ -1165,7 +1245,7 @@ class LiftoffCompiler {
});
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
case kExpr##opcode: \
- return EmitBinOp<kWasm##type, kWasm##type>( \
+ return EmitBinOp<ValueType::k##type, ValueType::k##type>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
@@ -1245,9 +1325,10 @@ class LiftoffCompiler {
CASE_FLOAT_BINOP(F64Max, F64, f64_max)
CASE_FLOAT_BINOP(F64CopySign, F64, f64_copysign)
case kExprI32DivS:
- EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ EmitBinOp<ValueType::kI32, ValueType::kI32>([this, decoder](
+ LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
@@ -1261,36 +1342,37 @@ class LiftoffCompiler {
});
break;
case kExprI32DivU:
- EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* div_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
- __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
- });
+ EmitBinOp<ValueType::kI32, ValueType::kI32>(
+ [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* div_by_zero = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
+ __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
+ });
break;
case kExprI32RemS:
- EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
- __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
- });
+ EmitBinOp<ValueType::kI32, ValueType::kI32>(
+ [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* rem_by_zero = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
+ __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
+ });
break;
case kExprI32RemU:
- EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
- __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
- });
+ EmitBinOp<ValueType::kI32, ValueType::kI32>(
+ [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* rem_by_zero = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
+ __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
+ });
break;
case kExprI64DivS:
- EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ EmitBinOp<ValueType::kI64, ValueType::kI64>([this, decoder](
+ LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
@@ -1308,9 +1390,10 @@ class LiftoffCompiler {
});
break;
case kExprI64DivU:
- EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ EmitBinOp<ValueType::kI64, ValueType::kI64>([this, decoder](
+ LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Label* div_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
@@ -1320,21 +1403,22 @@ class LiftoffCompiler {
});
break;
case kExprI64RemS:
- EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- Label* rem_by_zero = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
- if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
- ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
- EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
- }
- });
+ EmitBinOp<ValueType::kI64, ValueType::kI64>(
+ [this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label* rem_by_zero = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
+ if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
+ ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
+ EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
+ }
+ });
break;
case kExprI64RemU:
- EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ EmitBinOp<ValueType::kI64, ValueType::kI64>([this, decoder](
+ LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
@@ -1411,6 +1495,7 @@ class LiftoffCompiler {
return unsupported(decoder, kMultiValue, "multi-return");
}
if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
+ DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
@@ -1709,13 +1794,10 @@ class LiftoffCompiler {
Label* AddOutOfLineTrap(WasmCodePosition position,
WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
DCHECK(FLAG_wasm_bounds_checks);
- // The pc is needed for memory OOB trap with trap handler enabled. Other
- // callers should not even compute it.
- DCHECK_EQ(pc != 0, stub == WasmCode::kThrowWasmTrapMemOutOfBounds &&
- env_->use_trap_handler);
- out_of_line_code_.push_back(
- OutOfLineCode::Trap(stub, position, pc, RegisterDebugSideTableEntry()));
+ out_of_line_code_.push_back(OutOfLineCode::Trap(
+ stub, position, pc,
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
return out_of_line_code_.back().label.get();
}
@@ -1847,7 +1929,7 @@ class LiftoffCompiler {
}
source_position_table_builder_.AddPosition(__ pc_offset(),
- SourcePosition(position), false);
+ SourcePosition(position), true);
__ CallRuntimeStub(WasmCode::kWasmTraceMemory);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
@@ -1971,14 +2053,13 @@ class LiftoffCompiler {
WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
- DCHECK_EQ(ValueTypes::MachineTypeFor(kWasmI32),
- descriptor.GetParameterType(0));
+ DCHECK_EQ(kWasmI32.machine_type(), descriptor.GetParameterType(0));
Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
- RegisterDebugSideTableEntry();
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
if (kReturnRegister0 != result.gp()) {
@@ -1988,12 +2069,13 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, result);
}
- DebugSideTableBuilder::EntryBuilder* RegisterDebugSideTableEntry() {
+ DebugSideTableBuilder::EntryBuilder* RegisterDebugSideTableEntry(
+ DebugSideTableBuilder::AssumeSpilling assume_spilling) {
if (V8_LIKELY(!debug_sidetable_builder_)) return nullptr;
int stack_height = static_cast<int>(__ cache_state()->stack_height());
return debug_sidetable_builder_->NewEntry(
__ pc_offset(), __ num_locals(), stack_height,
- __ cache_state()->stack_state.begin());
+ __ cache_state()->stack_state.begin(), assume_spilling);
}
void CallDirect(FullDecoder* decoder,
@@ -2013,6 +2095,11 @@ class LiftoffCompiler {
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+ // Place the source position before any stack manipulation, since this will
+ // be used for OSR in debugging.
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+
if (imm.index < env_->module->num_imported_functions) {
// A direct call to an imported function.
LiftoffRegList pinned;
@@ -2035,25 +2122,21 @@ class LiftoffCompiler {
Register* explicit_instance = &imported_function_ref;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
- source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(decoder->position()), false);
-
__ CallIndirect(imm.sig, call_descriptor, target);
} else {
// A direct call within this module just gets the current instance.
__ PrepareCall(imm.sig, call_descriptor);
- source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(decoder->position()), false);
-
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
__ CallNativeWasmCode(addr);
}
- RegisterDebugSideTableEntry();
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ MaybeGenerateExtraSourcePos(decoder);
+
__ FinishCall(imm.sig, call_descriptor);
}
@@ -2072,6 +2155,11 @@ class LiftoffCompiler {
return;
}
+ // Place the source position before any stack manipulation, since this will
+ // be used for OSR in debugging.
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+
// Pop the index.
Register index = __ PopToRegister().gp();
// If that register is still being used after popping, we move it to another
@@ -2172,9 +2260,6 @@ class LiftoffCompiler {
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
- source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(decoder->position()), false);
-
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
call_descriptor =
@@ -2184,9 +2269,11 @@ class LiftoffCompiler {
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(imm.sig, call_descriptor, target);
- RegisterDebugSideTableEntry();
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ MaybeGenerateExtraSourcePos(decoder);
+
__ FinishCall(imm.sig, call_descriptor);
}
@@ -2207,22 +2294,240 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
+ case wasm::kExprF64x2Splat:
+ EmitUnOp<ValueType::kF64, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_f64x2_splat(dst, src);
+ });
+ break;
+ case wasm::kExprF64x2Add:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_f64x2_add(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprF64x2Sub:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_f64x2_sub(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprF64x2Mul:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_f64x2_mul(dst, lhs, rhs);
+ });
+ break;
case wasm::kExprF32x4Splat:
- EmitUnOp<kWasmF32, kWasmS128>(
+ EmitUnOp<ValueType::kF32, ValueType::kS128>(
[=](LiftoffRegister dst, LiftoffRegister src) {
__ emit_f32x4_splat(dst, src);
});
break;
+ case wasm::kExprF32x4Add:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_f32x4_add(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprF32x4Sub:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_f32x4_sub(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprF32x4Mul:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_f32x4_mul(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI64x2Splat:
+ EmitUnOp<ValueType::kI64, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i64x2_splat(dst, src);
+ });
+ break;
+ case wasm::kExprI64x2Add:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i64x2_add(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI64x2Sub:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i64x2_sub(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI64x2Mul:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i64x2_mul(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI32x4Splat:
+ EmitUnOp<ValueType::kI32, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i32x4_splat(dst, src);
+ });
+ break;
+ case wasm::kExprI32x4Add:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i32x4_add(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI32x4Sub:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i32x4_sub(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI32x4Mul:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i32x4_mul(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI16x8Splat:
+ EmitUnOp<ValueType::kI32, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i16x8_splat(dst, src);
+ });
+ break;
+ case wasm::kExprI16x8Add:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i16x8_add(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI16x8Sub:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i16x8_sub(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI16x8Mul:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i16x8_mul(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI8x16Splat:
+ EmitUnOp<ValueType::kI32, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i8x16_splat(dst, src);
+ });
+ break;
+ case wasm::kExprI8x16Add:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i8x16_add(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI8x16Sub:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i8x16_sub(dst, lhs, rhs);
+ });
+ break;
+ case wasm::kExprI8x16Mul:
+ EmitBinOp<ValueType::kS128, ValueType::kS128>(
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
+ __ emit_i8x16_mul(dst, lhs, rhs);
+ });
+ break;
default:
unsupported(decoder, kSimd, "simd");
}
}
+ template <ValueType::Kind src_type, ValueType::Kind result_type,
+ typename EmitFn>
+ void EmitSimdExtractLaneOp(EmitFn fn,
+ const SimdLaneImmediate<validate>& imm) {
+ static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass result_rc = reg_class_for(result_type);
+ LiftoffRegister lhs = __ PopToRegister();
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {lhs})
+ : __ GetUnusedRegister(result_rc);
+ fn(dst, lhs, imm.lane);
+ __ PushRegister(ValueType(result_type), dst);
+ }
+
+ template <ValueType::Kind src2_type, typename EmitFn>
+ void EmitSimdReplaceLaneOp(EmitFn fn,
+ const SimdLaneImmediate<validate>& imm) {
+ static constexpr RegClass src1_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass src2_rc = reg_class_for(src2_type);
+ static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ // On backends which need fp pair, src1_rc and result_rc end up being
+ // kFpRegPair, which is != kFpReg, but we still want to pin src2 when it is
+ // kFpReg, since it can overlap with those pairs.
+ static constexpr bool pin_src2 = kNeedS128RegPair && src2_rc == kFpReg;
+
+ // Does not work for arm
+ LiftoffRegister src2 = __ PopToRegister();
+ LiftoffRegister src1 = (src1_rc == src2_rc || pin_src2)
+ ? __ PopToRegister(LiftoffRegList::ForRegs(src2))
+ : __
+ PopToRegister();
+ LiftoffRegister dst =
+ (src2_rc == result_rc || pin_src2)
+ ? __ GetUnusedRegister(result_rc, {src1},
+ LiftoffRegList::ForRegs(src2))
+ : __ GetUnusedRegister(result_rc, {src1});
+ fn(dst, src1, src2, imm.lane);
+ __ PushRegister(kWasmS128, dst);
+ }
+
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
- unsupported(decoder, kSimd, "simd");
+ if (!CpuFeatures::SupportsWasmSimd128()) {
+ return unsupported(decoder, kSimd, "simd");
+ }
+ switch (opcode) {
+#define CASE_SIMD_EXTRACT_LANE_OP(opcode, type, fn) \
+ case wasm::kExpr##opcode: \
+ EmitSimdExtractLaneOp<ValueType::kS128, ValueType::k##type>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
+ __ emit_##fn(dst, lhs, imm_lane_idx); \
+ }, \
+ imm); \
+ break;
+ CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(I64x2ExtractLane, I64, i64x2_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(I32x4ExtractLane, I32, i32x4_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneU, I32, i16x8_extract_lane_u)
+ CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneS, I32, i16x8_extract_lane_s)
+ CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneU, I32, i8x16_extract_lane_u)
+ CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneS, I32, i8x16_extract_lane_s)
+#undef CASE_SIMD_EXTRACT_LANE_OP
+#define CASE_SIMD_REPLACE_LANE_OP(opcode, type, fn) \
+ case wasm::kExpr##opcode: \
+ EmitSimdReplaceLaneOp<ValueType::k##type>( \
+ [=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
+ uint8_t imm_lane_idx) { \
+ __ emit_##fn(dst, src1, src2, imm_lane_idx); \
+ }, \
+ imm); \
+ break;
+ CASE_SIMD_REPLACE_LANE_OP(F64x2ReplaceLane, F64, f64x2_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(F32x4ReplaceLane, F32, f32x4_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I64x2ReplaceLane, I64, i64x2_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I32x4ReplaceLane, I32, i32x4_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I16x8ReplaceLane, I32, i16x8_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I8x16ReplaceLane, I32, i8x16_replace_lane)
+#undef CASE_SIMD_REPLACE_LANE_OP
+ default:
+ unsupported(decoder, kSimd, "simd");
+ }
}
+
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
@@ -2326,6 +2631,154 @@ class LiftoffCompiler {
__ PushRegister(result_type, result);
}
+ void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm) {
+#ifdef V8_TARGET_ARCH_IA32
+ // With the current implementation we do not have enough registers on ia32
+ // to even get to the platform-specific code. Therefore we bailout early.
+ unsupported(decoder, kAtomics, "AtomicCompareExchange");
+ return;
+#else
+ ValueType result_type = type.value_type();
+ LiftoffRegList pinned;
+ LiftoffRegister new_value = pinned.set(__ PopToRegister());
+ LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
+ Register index = pinned.set(__ PopToRegister(pinned)).gp();
+ if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
+ kDoForceCheck)) {
+ return;
+ }
+ AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
+
+ uint32_t offset = imm.offset;
+ index = AddMemoryMasking(index, &offset, &pinned);
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LiftoffRegister result =
+ pinned.set(__ GetUnusedRegister(reg_class_for(result_type), pinned));
+
+ __ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
+ type);
+ __ PushRegister(result_type, result);
+#endif
+ }
+
+ template <typename BuiltinDescriptor>
+ compiler::CallDescriptor* GetBuiltinCallDescriptor(Zone* zone) {
+ BuiltinDescriptor interface_descriptor;
+ return compiler::Linkage::GetStubCallDescriptor(
+ zone, // zone
+ interface_descriptor, // descriptor
+ interface_descriptor.GetStackParameterCount(), // stack parameter count
+ compiler::CallDescriptor::kNoFlags, // flags
+ compiler::Operator::kNoProperties, // properties
+ StubCallMode::kCallWasmRuntimeStub); // stub call mode
+ }
+
+ void AtomicWait(FullDecoder* decoder, ValueType type,
+ const MemoryAccessImmediate<validate>& imm) {
+ LiftoffRegList pinned;
+ Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
+ if (BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
+ index_reg, pinned, kDoForceCheck)) {
+ return;
+ }
+ AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
+ pinned);
+
+ uint32_t offset = imm.offset;
+ index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
+ if (offset != 0) __ emit_i32_add(index_reg, index_reg, offset);
+
+ LiftoffAssembler::VarState timeout =
+ __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState expected_value =
+ __ cache_state()->stack_state.end()[-2];
+ LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
+
+ // We have to set the correct register for the index. It may have changed
+ // above in {AddMemoryMasking}.
+ index.MakeRegister(LiftoffRegister(index_reg));
+
+ WasmCode::RuntimeStubId target;
+ compiler::CallDescriptor* call_descriptor;
+ if (type == kWasmI32) {
+ if (kNeedI64RegPair) {
+ target = WasmCode::kWasmI32AtomicWait32;
+ call_descriptor =
+ GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
+ compilation_zone_);
+ } else {
+ target = WasmCode::kWasmI32AtomicWait64;
+ call_descriptor =
+ GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
+ compilation_zone_);
+ }
+ } else {
+ if (kNeedI64RegPair) {
+ target = WasmCode::kWasmI64AtomicWait32;
+ call_descriptor =
+ GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
+ compilation_zone_);
+ } else {
+ target = WasmCode::kWasmI64AtomicWait64;
+ call_descriptor =
+ GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
+ compilation_zone_);
+ }
+ }
+
+ ValueType sig_reps[] = {kWasmI32, type, kWasmI64};
+ FunctionSig sig(0, 3, sig_reps);
+
+ __ PrepareBuiltinCall(&sig, call_descriptor,
+ {index, expected_value, timeout});
+ __ CallRuntimeStub(target);
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(3);
+
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+
+ __ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
+ }
+
+ void AtomicNotify(FullDecoder* decoder,
+ const MemoryAccessImmediate<validate>& imm) {
+ LiftoffRegList pinned;
+ LiftoffRegister count = pinned.set(__ PopToRegister());
+ Register index = pinned.set(__ PopToRegister(pinned)).gp();
+ if (BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
+ index, pinned, kDoForceCheck)) {
+ return;
+ }
+ AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset, index,
+ pinned);
+
+ uint32_t offset = imm.offset;
+ index = AddMemoryMasking(index, &offset, &pinned);
+ if (offset) __ emit_i32_add(index, index, offset);
+
+ // TODO(ahaas): Use PrepareCall to prepare parameters.
+ __ SpillAllRegisters();
+
+ WasmAtomicNotifyDescriptor descriptor;
+ DCHECK_EQ(0, descriptor.GetStackParameterCount());
+ DCHECK_EQ(2, descriptor.GetRegisterParameterCount());
+ LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
+ {LiftoffRegister(descriptor.GetRegisterParameter(0)),
+ LiftoffRegister(index), kWasmI32},
+ {LiftoffRegister(descriptor.GetRegisterParameter(1)), count, kWasmI32}};
+ __ ParallelRegisterMove(ArrayVector(reg_moves));
+
+ __ CallRuntimeStub(WasmCode::kWasmAtomicNotify);
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+
+ __ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
+ }
+
#define ATOMIC_STORE_LIST(V) \
V(I32AtomicStore, kI32Store) \
V(I64AtomicStore, kI64Store) \
@@ -2344,42 +2797,58 @@ class LiftoffCompiler {
V(I64AtomicLoad16U, kI64Load16U) \
V(I64AtomicLoad32U, kI64Load32U)
-#define ATOMIC_BINOP_INSTRUCTION_LIST(V) \
- V(Add, I32AtomicAdd, kI32Store) \
- V(Add, I64AtomicAdd, kI64Store) \
- V(Add, I32AtomicAdd8U, kI32Store8) \
- V(Add, I32AtomicAdd16U, kI32Store16) \
- V(Add, I64AtomicAdd8U, kI64Store8) \
- V(Add, I64AtomicAdd16U, kI64Store16) \
- V(Add, I64AtomicAdd32U, kI64Store32) \
- V(Sub, I32AtomicSub, kI32Store) \
- V(Sub, I64AtomicSub, kI64Store) \
- V(Sub, I32AtomicSub8U, kI32Store8) \
- V(Sub, I32AtomicSub16U, kI32Store16) \
- V(Sub, I64AtomicSub8U, kI64Store8) \
- V(Sub, I64AtomicSub16U, kI64Store16) \
- V(Sub, I64AtomicSub32U, kI64Store32) \
- V(And, I32AtomicAnd, kI32Store) \
- V(And, I64AtomicAnd, kI64Store) \
- V(And, I32AtomicAnd8U, kI32Store8) \
- V(And, I32AtomicAnd16U, kI32Store16) \
- V(And, I64AtomicAnd8U, kI64Store8) \
- V(And, I64AtomicAnd16U, kI64Store16) \
- V(And, I64AtomicAnd32U, kI64Store32) \
- V(Or, I32AtomicOr, kI32Store) \
- V(Or, I64AtomicOr, kI64Store) \
- V(Or, I32AtomicOr8U, kI32Store8) \
- V(Or, I32AtomicOr16U, kI32Store16) \
- V(Or, I64AtomicOr8U, kI64Store8) \
- V(Or, I64AtomicOr16U, kI64Store16) \
- V(Or, I64AtomicOr32U, kI64Store32) \
- V(Xor, I32AtomicXor, kI32Store) \
- V(Xor, I64AtomicXor, kI64Store) \
- V(Xor, I32AtomicXor8U, kI32Store8) \
- V(Xor, I32AtomicXor16U, kI32Store16) \
- V(Xor, I64AtomicXor8U, kI64Store8) \
- V(Xor, I64AtomicXor16U, kI64Store16) \
- V(Xor, I64AtomicXor32U, kI64Store32)
+#define ATOMIC_BINOP_INSTRUCTION_LIST(V) \
+ V(Add, I32AtomicAdd, kI32Store) \
+ V(Add, I64AtomicAdd, kI64Store) \
+ V(Add, I32AtomicAdd8U, kI32Store8) \
+ V(Add, I32AtomicAdd16U, kI32Store16) \
+ V(Add, I64AtomicAdd8U, kI64Store8) \
+ V(Add, I64AtomicAdd16U, kI64Store16) \
+ V(Add, I64AtomicAdd32U, kI64Store32) \
+ V(Sub, I32AtomicSub, kI32Store) \
+ V(Sub, I64AtomicSub, kI64Store) \
+ V(Sub, I32AtomicSub8U, kI32Store8) \
+ V(Sub, I32AtomicSub16U, kI32Store16) \
+ V(Sub, I64AtomicSub8U, kI64Store8) \
+ V(Sub, I64AtomicSub16U, kI64Store16) \
+ V(Sub, I64AtomicSub32U, kI64Store32) \
+ V(And, I32AtomicAnd, kI32Store) \
+ V(And, I64AtomicAnd, kI64Store) \
+ V(And, I32AtomicAnd8U, kI32Store8) \
+ V(And, I32AtomicAnd16U, kI32Store16) \
+ V(And, I64AtomicAnd8U, kI64Store8) \
+ V(And, I64AtomicAnd16U, kI64Store16) \
+ V(And, I64AtomicAnd32U, kI64Store32) \
+ V(Or, I32AtomicOr, kI32Store) \
+ V(Or, I64AtomicOr, kI64Store) \
+ V(Or, I32AtomicOr8U, kI32Store8) \
+ V(Or, I32AtomicOr16U, kI32Store16) \
+ V(Or, I64AtomicOr8U, kI64Store8) \
+ V(Or, I64AtomicOr16U, kI64Store16) \
+ V(Or, I64AtomicOr32U, kI64Store32) \
+ V(Xor, I32AtomicXor, kI32Store) \
+ V(Xor, I64AtomicXor, kI64Store) \
+ V(Xor, I32AtomicXor8U, kI32Store8) \
+ V(Xor, I32AtomicXor16U, kI32Store16) \
+ V(Xor, I64AtomicXor8U, kI64Store8) \
+ V(Xor, I64AtomicXor16U, kI64Store16) \
+ V(Xor, I64AtomicXor32U, kI64Store32) \
+ V(Exchange, I32AtomicExchange, kI32Store) \
+ V(Exchange, I64AtomicExchange, kI64Store) \
+ V(Exchange, I32AtomicExchange8U, kI32Store8) \
+ V(Exchange, I32AtomicExchange16U, kI32Store16) \
+ V(Exchange, I64AtomicExchange8U, kI64Store8) \
+ V(Exchange, I64AtomicExchange16U, kI64Store16) \
+ V(Exchange, I64AtomicExchange32U, kI64Store32)
+
+#define ATOMIC_COMPARE_EXCHANGE_LIST(V) \
+ V(I32AtomicCompareExchange, kI32Store) \
+ V(I64AtomicCompareExchange, kI64Store) \
+ V(I32AtomicCompareExchange8U, kI32Store8) \
+ V(I32AtomicCompareExchange16U, kI32Store16) \
+ V(I64AtomicCompareExchange8U, kI64Store8) \
+ V(I64AtomicCompareExchange16U, kI64Store16) \
+ V(I64AtomicCompareExchange32U, kI64Store32)
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
@@ -2407,6 +2876,24 @@ class LiftoffCompiler {
ATOMIC_BINOP_INSTRUCTION_LIST(ATOMIC_BINOP_OP)
#undef ATOMIC_BINOP_OP
+
+#define ATOMIC_COMPARE_EXCHANGE_OP(name, type) \
+ case wasm::kExpr##name: \
+ AtomicCompareExchange(decoder, StoreType::type, imm); \
+ break;
+
+ ATOMIC_COMPARE_EXCHANGE_LIST(ATOMIC_COMPARE_EXCHANGE_OP)
+#undef ATOMIC_COMPARE_EXCHANGE_OP
+
+ case kExprI32AtomicWait:
+ AtomicWait(decoder, kWasmI32, imm);
+ break;
+ case kExprI64AtomicWait:
+ AtomicWait(decoder, kWasmI64, imm);
+ break;
+ case kExprAtomicNotify:
+ AtomicNotify(decoder, imm);
+ break;
default:
unsupported(decoder, kAtomics, "atomicop");
}
@@ -2415,53 +2902,284 @@ class LiftoffCompiler {
#undef ATOMIC_STORE_LIST
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_BINOP_INSTRUCTION_LIST
+#undef ATOMIC_COMPARE_EXCHANGE_LIST
+
+ void AtomicFence(FullDecoder* decoder) { __ AtomicFence(); }
- void AtomicFence(FullDecoder* decoder) {
- unsupported(decoder, kAtomics, "atomic.fence");
- }
void MemoryInit(FullDecoder* decoder,
- const MemoryInitImmediate<validate>& imm, const Value& dst,
- const Value& src, const Value& size) {
- unsupported(decoder, kBulkMemory, "memory.init");
+ const MemoryInitImmediate<validate>& imm, const Value&,
+ const Value&, const Value&) {
+ LiftoffRegList pinned;
+ LiftoffRegister size = pinned.set(__ PopToRegister());
+ LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
+
+ Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ FillInstanceInto(instance);
+
+ LiftoffRegister segment_index =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
+
+ ExternalReference ext_ref = ExternalReference::wasm_memory_init();
+ ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(1, 5, sig_reps);
+ LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
+ segment_index, size};
+ // We don't need the instance anymore after the call. We can use the
+ // register for the result.
+ LiftoffRegister result(instance);
+ GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
+ __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
}
+
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
- unsupported(decoder, kBulkMemory, "data.drop");
+ LiftoffRegList pinned;
+
+ Register seg_size_array =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize);
+
+ LiftoffRegister seg_index =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Scale the seg_index for the array access.
+ __ LoadConstant(seg_index,
+ WasmValue(imm.index << kWasmI32.element_size_log2()));
+
+ // Set the length of the segment to '0' to drop it.
+ LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(null_reg, WasmValue(0));
+ __ Store(seg_size_array, seg_index.gp(), 0, null_reg, StoreType::kI32Store,
+ pinned);
}
+
void MemoryCopy(FullDecoder* decoder,
- const MemoryCopyImmediate<validate>& imm, const Value& dst,
- const Value& src, const Value& size) {
- unsupported(decoder, kBulkMemory, "memory.copy");
+ const MemoryCopyImmediate<validate>& imm, const Value&,
+ const Value&, const Value&) {
+ LiftoffRegList pinned;
+ LiftoffRegister size = pinned.set(__ PopToRegister());
+ LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
+ Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ FillInstanceInto(instance);
+ ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
+ ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
+ kWasmI32};
+ FunctionSig sig(1, 4, sig_reps);
+ LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
+ // We don't need the instance anymore after the call. We can use the
+ // register for the result.
+ LiftoffRegister result(instance);
+ GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
+ __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
}
+
void MemoryFill(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm, const Value& dst,
- const Value& value, const Value& size) {
- unsupported(decoder, kBulkMemory, "memory.fill");
+ const MemoryIndexImmediate<validate>& imm, const Value&,
+ const Value&, const Value&) {
+ LiftoffRegList pinned;
+ LiftoffRegister size = pinned.set(__ PopToRegister());
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
+ Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ __ FillInstanceInto(instance);
+ ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
+ ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
+ kWasmI32};
+ FunctionSig sig(1, 4, sig_reps);
+ LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
+ // We don't need the instance anymore after the call. We can use the
+ // register for the result.
+ LiftoffRegister result(instance);
+ GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
+ __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
}
+
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, kBulkMemory, "table.init");
+ LiftoffRegList pinned;
+ LiftoffRegister table_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
+ WasmValue table_index_val(
+ static_cast<uint32_t>(Smi::FromInt(imm.table.index).ptr()));
+ WasmValue segment_index_val(
+ static_cast<uint32_t>(Smi::FromInt(imm.elem_segment_index).ptr()));
+#else
+ WasmValue table_index_val(
+ static_cast<uint64_t>(Smi::FromInt(imm.table.index).ptr()));
+ WasmValue segment_index_val(
+ static_cast<uint64_t>(Smi::FromInt(imm.elem_segment_index).ptr()));
+#endif
+ __ LoadConstant(table_index_reg, table_index_val);
+ LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
+ 0);
+
+ LiftoffRegister segment_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(segment_index_reg, segment_index_val);
+ LiftoffAssembler::VarState segment_index(kPointerValueType,
+ segment_index_reg, 0);
+
+ LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
+ LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3];
+
+ WasmCode::RuntimeStubId target = WasmCode::kWasmTableInit;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
+
+ ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32,
+ table_index_val.type(), segment_index_val.type()};
+ FunctionSig sig(0, 5, sig_reps);
+
+ __ PrepareBuiltinCall(&sig, call_descriptor,
+ {dst, src, size, table_index, segment_index});
+ __ CallRuntimeStub(target);
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(3);
+
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
}
+
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
- unsupported(decoder, kBulkMemory, "elem.drop");
+ LiftoffRegList pinned;
+ Register seg_size_array =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(seg_size_array, DroppedElemSegments,
+ kSystemPointerSize);
+
+ LiftoffRegister seg_index =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(seg_index, WasmValue(imm.index));
+
+ // Set the length of the segment to '0' to drop it.
+ LiftoffRegister one_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(one_reg, WasmValue(1));
+ __ Store(seg_size_array, seg_index.gp(), 0, one_reg, StoreType::kI32Store,
+ pinned);
}
+
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, kBulkMemory, "table.copy");
+ LiftoffRegList pinned;
+
+#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
+ WasmValue table_dst_index_val(
+ static_cast<uint32_t>(Smi::FromInt(imm.table_dst.index).ptr()));
+ WasmValue table_src_index_val(
+ static_cast<uint32_t>(Smi::FromInt(imm.table_src.index).ptr()));
+#else
+ WasmValue table_dst_index_val(
+ static_cast<uint64_t>(Smi::FromInt(imm.table_dst.index).ptr()));
+ WasmValue table_src_index_val(
+ static_cast<uint64_t>(Smi::FromInt(imm.table_src.index).ptr()));
+#endif
+
+ LiftoffRegister table_dst_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(table_dst_index_reg, table_dst_index_val);
+ LiftoffAssembler::VarState table_dst_index(kPointerValueType,
+ table_dst_index_reg, 0);
+
+ LiftoffRegister table_src_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(table_src_index_reg, table_src_index_val);
+ LiftoffAssembler::VarState table_src_index(kPointerValueType,
+ table_src_index_reg, 0);
+
+ LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
+ LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3];
+
+ WasmCode::RuntimeStubId target = WasmCode::kWasmTableCopy;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
+
+ ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32,
+ table_dst_index_val.type(),
+ table_src_index_val.type()};
+ FunctionSig sig(0, 5, sig_reps);
+
+ __ PrepareBuiltinCall(&sig, call_descriptor,
+ {dst, src, size, table_dst_index, table_src_index});
+ __ CallRuntimeStub(target);
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(3);
+
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
}
+
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& value, const Value& delta, Value* result) {
unsupported(decoder, kAnyRef, "table.grow");
}
+
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
unsupported(decoder, kAnyRef, "table.size");
}
+
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& start, const Value& value, const Value& count) {
unsupported(decoder, kAnyRef, "table.fill");
}
private:
+ // Emit additional source positions for return addresses. Used by debugging to
+ // OSR frames with different sets of breakpoints.
+ void MaybeGenerateExtraSourcePos(Decoder* decoder,
+ bool emit_breakpoint_position = false) {
+ if (V8_LIKELY(next_extra_source_pos_ptr_ == nullptr)) return;
+ int position = static_cast<int>(decoder->position());
+ while (*next_extra_source_pos_ptr_ < position) {
+ ++next_extra_source_pos_ptr_;
+ if (next_extra_source_pos_ptr_ == next_extra_source_pos_end_) {
+ next_extra_source_pos_ptr_ = next_extra_source_pos_end_ = nullptr;
+ return;
+ }
+ }
+ if (*next_extra_source_pos_ptr_ != position) return;
+ if (emit_breakpoint_position) {
+ // Removing a breakpoint while paused on that breakpoint will OSR the
+ // return address as follows:
+ // pos instr
+ // 0 foo
+ // 1 call WasmDebugBreak
+ // 1 bar // top frame return address
+ // becomes:
+ // pos instr
+ // 0 foo
+ // 1 nop // top frame return address
+ // bar
+ // {WasmCompiledFrame::position} would then return "0" as the source
+ // position of the top frame instead of "1". This is fixed by explicitly
+ // emitting the missing position before the return address, with a nop so
+ // that code offsets do not collide.
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+ __ nop();
+ }
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+ // Add a nop here, such that following code has another
+ // PC and does not collide with the source position recorded above.
+ // TODO(thibaudm/clemens): Remove this.
+ __ nop();
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
LiftoffAssembler asm_;
@@ -2487,8 +3205,13 @@ class LiftoffCompiler {
uint32_t pc_offset_stack_frame_construction_ = 0;
// For emitting breakpoint, we store a pointer to the position of the next
// breakpoint, and a pointer after the list of breakpoints as end marker.
+ // A single breakpoint at offset 0 indicates that we should prepare the
+ // function for stepping by flooding it with breakpoints.
int* next_breakpoint_ptr_ = nullptr;
int* next_breakpoint_end_ = nullptr;
+ // Use a similar approach to generate additional source positions.
+ int* next_extra_source_pos_ptr_ = nullptr;
+ int* next_extra_source_pos_end_ = nullptr;
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
@@ -2517,7 +3240,9 @@ class LiftoffCompiler {
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator* allocator, CompilationEnv* env,
const FunctionBody& func_body, int func_index, Counters* counters,
- WasmFeatures* detected, Vector<int> breakpoints) {
+ WasmFeatures* detected, Vector<int> breakpoints,
+ std::unique_ptr<DebugSideTable>* debug_sidetable,
+ Vector<int> extra_source_pos) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation", "func_index", func_index,
@@ -2535,11 +3260,16 @@ WasmCompilationResult ExecuteLiftoffCompilation(
// generation.
std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
wasm::WasmInstructionBuffer::New(128 + code_size_estimate * 4 / 3);
- DebugSideTableBuilder* const kNoDebugSideTable = nullptr;
+ std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
+ // If we are emitting breakpoints, we should also emit the debug side table.
+ DCHECK_IMPLIES(!breakpoints.empty(), debug_sidetable != nullptr);
+ if (debug_sidetable) {
+ debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
+ }
WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, detected, func_body,
call_descriptor, env, &zone, instruction_buffer->CreateView(),
- kNoDebugSideTable, breakpoints);
+ debug_sidetable_builder.get(), breakpoints, extra_source_pos);
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
@@ -2556,30 +3286,34 @@ WasmCompilationResult ExecuteLiftoffCompilation(
counters->liftoff_bailout_reasons()->AddSample(
static_cast<int>(compiler->bailout_reason()));
if (compiler->did_bailout()) {
- // Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
- return WasmCompilationResult{};
+ } else {
+ counters->liftoff_compiled_functions()->Increment();
}
-
- counters->liftoff_compiled_functions()->Increment();
}
+ if (compiler->did_bailout()) return WasmCompilationResult{};
+
WasmCompilationResult result;
compiler->GetCode(&result.code_desc);
result.instr_buffer = instruction_buffer->ReleaseBuffer();
result.source_positions = compiler->GetSourcePositionTable();
- result.protected_instructions = compiler->GetProtectedInstructions();
+ result.protected_instructions_data = compiler->GetProtectedInstructionsData();
result.frame_slot_count = compiler->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
+ result.func_index = func_index;
result.result_tier = ExecutionTier::kLiftoff;
+ if (debug_sidetable) {
+ *debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
+ }
DCHECK(result.succeeded());
return result;
}
-DebugSideTable GenerateLiftoffDebugSideTable(AccountingAllocator* allocator,
- CompilationEnv* env,
- const FunctionBody& func_body) {
+std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
+ AccountingAllocator* allocator, CompilationEnv* env,
+ const FunctionBody& func_body) {
Zone zone(allocator, "LiftoffDebugSideTableZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
DebugSideTableBuilder debug_sidetable_builder;
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index fba92146e1..863fa7ee07 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -54,9 +54,11 @@ enum LiftoffBailoutReason : int8_t {
V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
- Counters*, WasmFeatures* detected_features, Vector<int> breakpoints = {});
+ Counters*, WasmFeatures* detected_features, Vector<int> breakpoints = {},
+ std::unique_ptr<DebugSideTable>* = nullptr,
+ Vector<int> extra_source_pos = {});
-V8_EXPORT_PRIVATE DebugSideTable GenerateLiftoffDebugSideTable(
+V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
AccountingAllocator*, CompilationEnv*, const FunctionBody&);
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index f2d42af7b1..16ad652ade 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -53,22 +53,26 @@ static inline constexpr bool needs_fp_reg_pair(ValueType type) {
return kNeedS128RegPair && type == kWasmS128;
}
-static inline constexpr RegClass reg_class_for(ValueType type) {
- switch (type) {
- case kWasmF32:
- case kWasmF64:
+static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
+ switch (kind) {
+ case ValueType::kF32:
+ case ValueType::kF64:
return kFpReg;
- case kWasmI32:
+ case ValueType::kI32:
return kGpReg;
- case kWasmI64:
+ case ValueType::kI64:
return kNeedI64RegPair ? kGpRegPair : kGpReg;
- case kWasmS128:
+ case ValueType::kS128:
return kNeedS128RegPair ? kFpRegPair : kFpReg;
default:
return kNoReg; // unsupported type
}
}
+static inline constexpr RegClass reg_class_for(ValueType type) {
+ return reg_class_for(type.kind());
+}
+
// Description of LiftoffRegister code encoding.
// This example uses the ARM architecture, which as of writing has:
// - 9 GP registers, requiring 4 bits
@@ -155,11 +159,17 @@ class LiftoffRegister {
DCHECK_EQ(reg, fp());
}
- static LiftoffRegister from_liftoff_code(uint32_t code) {
- DCHECK_LE(0, code);
- DCHECK_GT(kAfterMaxLiftoffRegCode, code);
- DCHECK_EQ(code, static_cast<storage_t>(code));
- return LiftoffRegister(code);
+ static LiftoffRegister from_liftoff_code(int code) {
+ LiftoffRegister reg{static_cast<storage_t>(code)};
+ // Check that the code is correct by round-tripping through the
+ // reg-class-specific constructor.
+ DCHECK(
+ (reg.is_gp() && code == LiftoffRegister{reg.gp()}.liftoff_code()) ||
+ (reg.is_fp() && code == LiftoffRegister{reg.fp()}.liftoff_code()) ||
+ (reg.is_gp_pair() &&
+ code == ForPair(reg.low_gp(), reg.high_gp()).liftoff_code()) ||
+ (reg.is_fp_pair() && code == ForFpPair(reg.low_fp()).liftoff_code()));
+ return reg;
}
static LiftoffRegister from_code(RegClass rc, int code) {
@@ -253,8 +263,8 @@ class LiftoffRegister {
}
int liftoff_code() const {
- DCHECK(is_gp() || is_fp());
- return code_;
+ STATIC_ASSERT(sizeof(int) >= sizeof(storage_t));
+ return static_cast<int>(code_);
}
RegClass reg_class() const {
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 32215b3df5..8461e0435f 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -59,20 +59,20 @@ inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
int32_t offset, ValueType type) {
MemOperand src(base, offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->lw(dst.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->lw(dst.low_gp(),
MemOperand(base, offset + liftoff::kLowWordOffset));
assm->lw(dst.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->lwc1(dst.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->Ldc1(dst.fp(), src);
break;
default:
@@ -83,20 +83,20 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
MemOperand dst(base, offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->Usw(src.gp(), dst);
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->Usw(src.low_gp(),
MemOperand(base, offset + liftoff::kLowWordOffset));
assm->Usw(src.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->Uswc1(src.fp(), dst, t8);
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->Usdc1(src.fp(), dst, t8);
break;
default:
@@ -105,18 +105,18 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->push(reg.gp());
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->Push(reg.high_gp(), reg.low_gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->addiu(sp, sp, -sizeof(float));
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->addiu(sp, sp, -sizeof(double));
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
@@ -298,17 +298,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type) {
- case kWasmS128:
- return ValueTypes::ElementSizeInBytes(type);
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -318,11 +318,11 @@ bool LiftoffAssembler::NeedsAlignment(ValueType type) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case kWasmI64: {
+ case ValueType::kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -330,10 +330,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::li(reg.high_gp(), Operand(high_word));
break;
}
- case kWasmF32:
+ case ValueType::kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -568,6 +568,21 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { sync(); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -597,18 +612,18 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
sw(reg.gp(), dst);
break;
- case kWasmI64:
+ case ValueType::kI64:
sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case kWasmF32:
+ case ValueType::kF32:
swc1(reg.fp(), dst);
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
default:
@@ -619,14 +634,14 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (value.type()) {
- case kWasmI32: {
+ switch (value.type().kind()) {
+ case ValueType::kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
- case kWasmI64: {
+ case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
int32_t low_word = value.to_i64();
@@ -647,18 +662,18 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
lw(reg.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case kWasmF32:
+ case ValueType::kF32:
lwc1(reg.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
default:
@@ -1517,12 +1532,328 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
// TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
// CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
// 3a4000 support MSA.
- bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_mul");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -1595,7 +1926,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -1605,7 +1936,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += ValueTypes::MemSize(param_type);
+ arg_bytes += param_type.element_size_bytes();
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -1642,7 +1973,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -1668,14 +1999,12 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addiu(sp, sp, size);
}
-void LiftoffAssembler::DebugBreak() { stop(); }
-
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
- if (src.type() == kWasmF64) {
+ if (src.type().kind() == ValueType::kF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
@@ -1687,7 +2016,7 @@ void LiftoffStackSlots::Construct() {
break;
}
case LiftoffAssembler::VarState::kRegister:
- if (src.type() == kWasmI64) {
+ if (src.type().kind() == ValueType::kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
kWasmI32);
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 09dd5105bf..a3f8d60680 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -48,17 +48,17 @@ inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->lw(dst.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->ld(dst.gp(), src);
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->lwc1(dst.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->Ldc1(dst.fp(), src);
break;
default:
@@ -69,17 +69,17 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
MemOperand dst(base, offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->Usw(src.gp(), dst);
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->Usd(src.gp(), dst);
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->Uswc1(src.fp(), dst, t8);
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->Usdc1(src.fp(), dst, t8);
break;
default:
@@ -88,19 +88,19 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->push(reg.gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
@@ -256,17 +256,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type) {
- case kWasmS128:
- return ValueTypes::ElementSizeInBytes(type);
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -276,17 +276,17 @@ bool LiftoffAssembler::NeedsAlignment(ValueType type) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case kWasmI64:
+ case ValueType::kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break;
- case kWasmF32:
+ case ValueType::kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -484,6 +484,21 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { sync(); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -514,17 +529,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
Sw(reg.gp(), dst);
break;
- case kWasmI64:
+ case ValueType::kI64:
Sd(reg.gp(), dst);
break;
- case kWasmF32:
+ case ValueType::kF32:
Swc1(reg.fp(), dst);
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
default:
@@ -535,14 +550,14 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (value.type()) {
- case kWasmI32: {
+ switch (value.type().kind()) {
+ case ValueType::kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
- case kWasmI64: {
+ case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
@@ -557,17 +572,17 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
Lw(reg.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
Ld(reg.gp(), src);
break;
- case kWasmF32:
+ case ValueType::kF32:
Lwc1(reg.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
default:
@@ -1309,12 +1324,328 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_exract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
// TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
// CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
// 3a4000 support MSA.
- bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_exract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_exract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_exract_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_exract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_exract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_exract_lane_u");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_exract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
+ // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
+ // 3a4000 support MSA.
+ bailout(kSimd, "emit_i8x16_mul");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -1387,7 +1718,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -1397,7 +1728,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += ValueTypes::MemSize(param_type);
+ arg_bytes += param_type.element_size_bytes();
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -1434,7 +1765,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
Call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -1460,8 +1791,6 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
daddiu(sp, sp, size);
}
-void LiftoffAssembler::DebugBreak() { stop(); }
-
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 89294877ff..3b5a5f3a40 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -66,17 +66,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type) {
- case kWasmS128:
- return ValueTypes::ElementSizeInBytes(type);
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -170,6 +170,21 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { sync(); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -517,11 +532,216 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2splat");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2extractlane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2replacelane");
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2mul");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
}
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4extractlane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4replacelane");
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4mul");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2splat");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2extractlane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2replacelane");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2mul");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4extractlane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4replacelane");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4mul");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8splat");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8add");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8sub");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8mul");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16splat");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16extractlane_u");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16replacelane");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16extractlane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16add");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16sub");
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16mul");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
bailout(kUnsupportedArchitecture, "StackCheck");
}
@@ -546,7 +766,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -558,7 +778,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
bailout(kUnsupportedArchitecture, "CallIndirect");
@@ -576,8 +796,6 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
-void LiftoffAssembler::DebugBreak() { stop(); }
-
void LiftoffStackSlots::Construct() {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index f18c60a1fa..e311677e79 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -65,17 +65,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type) {
- case kWasmS128:
- return ValueTypes::ElementSizeInBytes(type);
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
default:
return kStackSlotSize;
}
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- switch (type) {
- case kWasmS128:
+ switch (type.kind()) {
+ case ValueType::kS128:
return true;
default:
// No alignment because all other types are kStackSlotSize.
@@ -169,6 +169,21 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { bailout(kAtomics, "AtomicFence"); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -521,11 +536,216 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2splat");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2extractlane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2replacelane");
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2mul");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
}
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4extractlane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4replacelane");
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4mul");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2splat");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2extractlane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2replacelane");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2mul");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4extractlane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4replacelane");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4mul");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8splat");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8add");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8sub");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8mul");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16splat");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16extractlane_u");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16extractlane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16replacelane");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16add");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16sub");
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16mul");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
bailout(kUnsupportedArchitecture, "StackCheck");
}
@@ -550,7 +770,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -562,7 +782,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
bailout(kUnsupportedArchitecture, "CallIndirect");
@@ -580,8 +800,6 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
-void LiftoffAssembler::DebugBreak() { stop(); }
-
void LiftoffStackSlots::Construct() {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index e760f275fb..6c58625536 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -58,19 +58,22 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->movl(dst.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->movq(dst.gp(), src);
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->Movss(dst.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->Movsd(dst.fp(), src);
break;
+ case ValueType::kS128:
+ assm->Movdqu(dst.fp(), src);
+ break;
default:
UNREACHABLE();
}
@@ -78,17 +81,17 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
assm->movl(dst, src.gp());
break;
- case kWasmI64:
+ case ValueType::kI64:
assm->movq(dst, src.gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->Movss(dst, src.fp());
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->Movsd(dst, src.fp());
break;
default:
@@ -97,19 +100,23 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type) {
- case kWasmI32:
- case kWasmI64:
+ switch (type.kind()) {
+ case ValueType::kI32:
+ case ValueType::kI64:
assm->pushq(reg.gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
assm->AllocateStackSpace(kSystemPointerSize);
assm->Movss(Operand(rsp, 0), reg.fp());
break;
- case kWasmF64:
+ case ValueType::kF64:
assm->AllocateStackSpace(kSystemPointerSize);
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
+ case ValueType::kS128:
+ assm->AllocateStackSpace(kSystemPointerSize * 2);
+ assm->Movdqu(Operand(rsp, 0), reg.fp());
+ break;
default:
UNREACHABLE();
}
@@ -178,32 +185,32 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return ValueTypes::ElementSizeInBytes(type);
+ return type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) { return false; }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
movl(reg.gp(), Immediate(value.to_i32(), rmode));
}
break;
- case kWasmI64:
+ case ValueType::kI64:
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
break;
- case kWasmF32:
+ case ValueType::kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case kWasmF64:
+ case ValueType::kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -453,32 +460,41 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
}
namespace liftoff {
+#define __ lasm->
+// Checks if a register in {possible_uses} uses {reg}. If so, it allocates a
+// replacement register for that use, and moves the content of {reg} to {use}.
+// The replacement register is written into the pointer stored in
+// {possible_uses}.
+inline void ClearRegister(LiftoffAssembler* lasm, Register reg,
+ std::initializer_list<Register*> possible_uses,
+ LiftoffRegList pinned) {
+ liftoff::SpillRegisters(lasm, reg);
+ Register replacement = no_reg;
+ for (Register* use : possible_uses) {
+ if (reg != *use) continue;
+ if (replacement == no_reg) {
+ replacement = __ GetUnusedRegister(kGpReg, pinned).gp();
+ __ movq(replacement, reg);
+ }
+ // We cannot leave this loop early. There may be multiple uses of {reg}.
+ *use = replacement;
+ }
+}
+
inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
-#define __ lasm->
DCHECK(!__ cache_state()->is_used(value));
+ Register value_reg = value.gp();
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
- liftoff::SpillRegisters(lasm, rax);
- Register value_reg = value.gp();
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
- if (pinned.has(rax)) {
- Register replacement =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- for (Register* reg : {&dst_addr, &offset_reg, &value_reg}) {
- if (*reg == rax) {
- *reg = replacement;
- }
- }
- __ movq(replacement, rax);
- }
-
+ ClearRegister(lasm, rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (__ emit_debug_code() && offset_reg != no_reg) {
__ AssertZeroExtended(offset_reg);
}
@@ -566,6 +582,99 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
offset_reg, offset_imm, value, type);
}
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister value, StoreType type) {
+ DCHECK(!cache_state()->is_used(value));
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
+ Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ xchgb(value.gp(), dst_op);
+ movzxbq(value.gp(), value.gp());
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ xchgw(value.gp(), dst_op);
+ movzxwq(value.gp(), value.gp());
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ xchgl(value.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ xchgq(value.gp(), dst_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ Register value_reg = new_value.gp();
+ // The cmpxchg instruction uses rax to store the old value of the
+ // compare-exchange primitive. Therefore we have to spill the register and
+ // move any use to another register.
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, value_reg);
+ liftoff::ClearRegister(this, rax, {&dst_addr, &offset_reg, &value_reg},
+ pinned);
+ if (expected.gp() != rax) {
+ movq(rax, expected.gp());
+ }
+
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
+ Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+ lock();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ cmpxchgb(dst_op, value_reg);
+ movzxbq(result.gp(), rax);
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ cmpxchgw(dst_op, value_reg);
+ movzxwq(result.gp(), rax);
+ break;
+ }
+ case StoreType::kI32Store: {
+ cmpxchgl(dst_op, value_reg);
+ if (result.gp() != rax) {
+ movl(result.gp(), rax);
+ }
+ break;
+ }
+ case StoreType::kI64Store32: {
+ cmpxchgl(dst_op, value_reg);
+ // Zero extension.
+ movl(result.gp(), rax);
+ break;
+ }
+ case StoreType::kI64Store: {
+ cmpxchgq(dst_op, value_reg);
+ if (result.gp() != rax) {
+ movq(result.gp(), rax);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicFence() { mfence(); }
+
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -578,11 +687,11 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
- if (ValueTypes::ElementSizeLog2Of(type) == 2) {
+ if (type.element_size_log2() == 2) {
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
} else {
- DCHECK_EQ(3, ValueTypes::ElementSizeLog2Of(type));
+ DCHECK_EQ(3, type.element_size_log2());
movq(kScratchRegister, src);
movq(dst, kScratchRegister);
}
@@ -614,20 +723,20 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
movl(dst, reg.gp());
break;
- case kWasmI64:
+ case ValueType::kI64:
movq(dst, reg.gp());
break;
- case kWasmF32:
+ case ValueType::kF32:
Movss(dst, reg.fp());
break;
- case kWasmF64:
+ case ValueType::kF64:
Movsd(dst, reg.fp());
break;
- case kWasmS128:
+ case ValueType::kS128:
Movdqu(dst, reg.fp());
break;
default:
@@ -638,11 +747,11 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
movl(dst, Immediate(value.to_i32()));
break;
- case kWasmI64: {
+ case ValueType::kI64: {
if (is_int32(value.to_i64())) {
// Sign extend low word.
movq(dst, Immediate(static_cast<int32_t>(value.to_i64())));
@@ -664,20 +773,20 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
Operand src = liftoff::GetStackSlot(offset);
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
movl(reg.gp(), src);
break;
- case kWasmI64:
+ case ValueType::kI64:
movq(reg.gp(), src);
break;
- case kWasmF32:
+ case ValueType::kF32:
Movss(reg.fp(), src);
break;
- case kWasmF64:
+ case ValueType::kF64:
Movsd(reg.fp(), src);
break;
- case kWasmS128:
+ case ValueType::kS128:
Movdqu(reg.fp(), src);
break;
default:
@@ -923,16 +1032,16 @@ void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
}
namespace liftoff {
-template <ValueType type>
+template <ValueType::Kind type>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
- assm->Move(kScratchRegister, src, type);
- if (amount != rcx) assm->Move(rcx, amount, type);
+ assm->Move(kScratchRegister, src, ValueType(type));
+ if (amount != rcx) assm->Move(rcx, amount, ValueType(type));
(assm->*emit_shift)(kScratchRegister);
- assm->Move(rcx, kScratchRegister, type);
+ assm->Move(rcx, kScratchRegister, ValueType(type));
return;
}
@@ -944,11 +1053,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister;
- assm->Move(rcx, amount, type);
+ assm->Move(rcx, amount, ValueType(type));
}
// Do the actual shift.
- if (dst != src) assm->Move(dst, src, type);
+ if (dst != src) assm->Move(dst, src, ValueType(type));
(assm->*emit_shift)(dst);
// Restore rcx if needed.
@@ -958,8 +1067,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount,
- &Assembler::shll_cl);
+ liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
+ &Assembler::shll_cl);
}
void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
@@ -970,8 +1079,8 @@ void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount,
- &Assembler::sarl_cl);
+ liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
+ &Assembler::sarl_cl);
}
void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
@@ -982,8 +1091,8 @@ void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount,
- &Assembler::shrl_cl);
+ liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
+ &Assembler::shrl_cl);
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
@@ -1114,8 +1223,8 @@ void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::shlq_cl);
+ liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shlq_cl);
}
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
@@ -1126,8 +1235,8 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::sarq_cl);
+ liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::sarq_cl);
}
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
@@ -1138,8 +1247,8 @@ void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::shrq_cl);
+ liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shrq_cl);
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
@@ -1681,11 +1790,11 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
if (rhs != no_reg) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
cmpl(lhs, rhs);
break;
- case kWasmI64:
+ case ValueType::kI64:
cmpq(lhs, rhs);
break;
default:
@@ -1765,6 +1874,102 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
rhs);
}
+// TODO(fanchenk): Distinguish mov* if data bypass delay matter.
+namespace liftoff {
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
+ void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
+void EmitSimdCommutativeBinOp(
+ LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs, base::Optional<CpuFeature> feature = base::nullopt) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), lhs.fp(), rhs.fp());
+ return;
+ }
+
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
+ if (dst.fp() == rhs.fp()) {
+ (assm->*sse_op)(dst.fp(), lhs.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) (assm->movaps)(dst.fp(), lhs.fp());
+ (assm->*sse_op)(dst.fp(), rhs.fp());
+ }
+}
+
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
+ void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
+void EmitSimdSub(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), lhs.fp(), rhs.fp());
+ } else if (lhs.fp() == rhs.fp()) {
+ assm->pxor(dst.fp(), dst.fp());
+ } else if (dst.fp() == rhs.fp()) {
+ assm->movaps(kScratchDoubleReg, rhs.fp());
+ assm->movaps(dst.fp(), lhs.fp());
+ (assm->*sse_op)(dst.fp(), kScratchDoubleReg);
+ } else {
+ if (dst.fp() != lhs.fp()) assm->movaps(dst.fp(), lhs.fp());
+ (assm->*sse_op)(dst.fp(), rhs.fp());
+ }
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movddup(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrq(kScratchRegister, lhs.fp(), static_cast<int8_t>(imm_lane_idx));
+ Movq(dst.fp(), kScratchRegister);
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ if (imm_lane_idx == 0) {
+ vpblendw(dst.fp(), src1.fp(), src2.fp(), 0b00001111);
+ } else {
+ vmovlhps(dst.fp(), src1.fp(), src2.fp());
+ }
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ if (imm_lane_idx == 0) {
+ pblendw(dst.fp(), src2.fp(), 0b00001111);
+ } else {
+ movlhps(dst.fp(), src2.fp());
+ }
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddpd, &Assembler::addpd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vsubpd, &Assembler::subpd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulpd, &Assembler::mulpd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() != src.fp()) {
@@ -1773,6 +1978,318 @@ void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
Shufps(dst.fp(), src.fp(), static_cast<byte>(0));
}
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ insertps(dst.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ }
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddps, &Assembler::addps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vsubps, &Assembler::subps>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulps, &Assembler::mulps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movq(dst.fp(), src.gp());
+ Movddup(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrq(dst.gp(), lhs.fp(), static_cast<int8_t>(imm_lane_idx));
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrq(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrq(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubq, &Assembler::psubq>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp1 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ LiftoffRegister tmp2 =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
+ Movaps(tmp1.fp(), lhs.fp());
+ Movaps(tmp2.fp(), rhs.fp());
+ // Multiply high dword of each qword of left with right.
+ Psrlq(tmp1.fp(), 32);
+ Pmuludq(tmp1.fp(), rhs.fp());
+ // Multiply high dword of each qword of right with left.
+ Psrlq(tmp2.fp(), 32);
+ Pmuludq(tmp2.fp(), lhs.fp());
+ Paddq(tmp2.fp(), tmp1.fp());
+ Psllq(tmp2.fp(), 32);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpmuludq(dst.fp(), lhs.fp(), rhs.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ pmuludq(dst.fp(), rhs.fp());
+ }
+ Paddq(dst.fp(), tmp2.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrd(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrd(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubd, &Assembler::psubd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmulld, &Assembler::pmulld>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsxwl(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrw(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubw, &Assembler::psubw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmullw, &Assembler::pmullw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ Pshufb(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsxbl(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrb(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrb(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdSub<&Assembler::vpsubb, &Assembler::psubb>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ vpsrlw(tmp.fp(), lhs.fp(), 8);
+ vpsrlw(kScratchDoubleReg, rhs.fp(), 8);
+ // t = I16x8Mul(t0, t1)
+ //    => __PP __PP ...  __PP  __PP
+ vpmullw(tmp.fp(), tmp.fp(), kScratchDoubleReg);
+ // s = left * 256
+ vpsllw(kScratchDoubleReg, lhs.fp(), 8);
+ // dst = I16x8Mul(left * 256, right)
+ //    => pp__ pp__ ...  pp__  pp__
+ vpmullw(dst.fp(), kScratchDoubleReg, rhs.fp());
+ // dst = I16x8Shr(dst, 8)
+ //    => 00pp 00pp ...  00pp  00pp
+ vpsrlw(dst.fp(), dst.fp(), 8);
+ // t = I16x8Shl(t, 8)
+ //    => PP00 PP00 ...  PP00  PP00
+ vpsllw(tmp.fp(), tmp.fp(), 8);
+ // dst = I16x8Or(dst, t)
+ //    => PPpp PPpp ...  PPpp  PPpp
+ vpor(dst.fp(), dst.fp(), tmp.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+ // t = 00AA 00AA ... 00AA 00AA
+ // s = 00BB 00BB ... 00BB 00BB
+ movaps(tmp.fp(), dst.fp());
+ movaps(kScratchDoubleReg, rhs.fp());
+ psrlw(tmp.fp(), 8);
+ psrlw(kScratchDoubleReg, 8);
+ // dst = left * 256
+ psllw(dst.fp(), 8);
+ // t = I16x8Mul(t, s)
+ //    => __PP __PP ...  __PP  __PP
+ pmullw(tmp.fp(), kScratchDoubleReg);
+ // dst = I16x8Mul(left * 256, right)
+ //    => pp__ pp__ ...  pp__  pp__
+ pmullw(dst.fp(), rhs.fp());
+ // t = I16x8Shl(t, 8)
+ //    => PP00 PP00 ...  PP00  PP00
+ psllw(tmp.fp(), 8);
+ // dst = I16x8Shr(dst, 8)
+ //    => 00pp 00pp ...  00pp  00pp
+ psrlw(dst.fp(), 8);
+ // dst = I16x8Or(dst, t)
+ //    => PPpp PPpp ...  PPpp  PPpp
+ por(dst.fp(), tmp.fp());
+ }
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
cmpq(rsp, Operand(limit_address, 0));
j(below_equal, ool_code);
@@ -1833,7 +2350,7 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
-void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
@@ -1843,7 +2360,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_type);
- arg_bytes += ValueTypes::MemSize(param_type);
+ arg_bytes += param_type.element_size_bytes();
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -1879,7 +2396,7 @@ void LiftoffAssembler::CallNativeWasmCode(Address addr) {
near_call(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -1908,8 +2425,6 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addq(rsp, Immediate(size));
}
-void LiftoffAssembler::DebugBreak() { int3(); }
-
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
@@ -1920,6 +2435,11 @@ void LiftoffStackSlots::Construct() {
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister);
+ } else if (src.type() == kWasmS128) {
+ // Since offsets are subtracted from sp, we need a smaller offset to
+ // push the top of a s128 value.
+ asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
} else {
// For all other types, just push the whole (8-byte) stack slot.
// This is also ok for f32 values (even though we copy 4 uninitialized
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 8fdf180182..d098a5f57f 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -62,18 +62,18 @@ auto ReadLebU64(const byte_t** pos) -> uint64_t {
}
ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
- switch (v8_valtype) {
- case i::wasm::kWasmI32:
+ switch (v8_valtype.kind()) {
+ case i::wasm::ValueType::kI32:
return I32;
- case i::wasm::kWasmI64:
+ case i::wasm::ValueType::kI64:
return I64;
- case i::wasm::kWasmF32:
+ case i::wasm::ValueType::kF32:
return F32;
- case i::wasm::kWasmF64:
+ case i::wasm::ValueType::kF64:
return F64;
- case i::wasm::kWasmFuncRef:
+ case i::wasm::ValueType::kFuncRef:
return FUNCREF;
- case i::wasm::kWasmAnyRef:
+ case i::wasm::ValueType::kAnyRef:
return ANYREF;
default:
// TODO(wasm+): support new value types
@@ -1212,7 +1212,7 @@ namespace {
class SignatureHelper : public i::AllStatic {
public:
// Use an invalid type as a marker separating params and results.
- static const i::wasm::ValueType kMarker = i::wasm::kWasmStmt;
+ static constexpr i::wasm::ValueType kMarker = i::wasm::kWasmStmt;
static i::Handle<i::PodArray<i::wasm::ValueType>> Serialize(
i::Isolate* isolate, FuncType* type) {
@@ -1271,6 +1271,10 @@ class SignatureHelper : public i::AllStatic {
}
};
+// Explicit instantiation makes the linker happy for component builds of
+// wasm_api_tests.
+constexpr i::wasm::ValueType SignatureHelper::kMarker;
+
auto make_func(Store* store_abs, FuncData* data) -> own<Func> {
auto store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
@@ -1322,7 +1326,7 @@ auto Func::param_arity() const -> size_t {
DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
i::Handle<i::WasmExportedFunction> function =
i::Handle<i::WasmExportedFunction>::cast(func);
- i::wasm::FunctionSig* sig =
+ const i::wasm::FunctionSig* sig =
function->instance().module()->functions[function->function_index()].sig;
return sig->parameter_count();
}
@@ -1335,7 +1339,7 @@ auto Func::result_arity() const -> size_t {
DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
i::Handle<i::WasmExportedFunction> function =
i::Handle<i::WasmExportedFunction>::cast(func);
- i::wasm::FunctionSig* sig =
+ const i::wasm::FunctionSig* sig =
function->instance().module()->functions[function->function_index()].sig;
return sig->return_count();
}
@@ -1375,7 +1379,7 @@ i::Address CallTargetFromCache(i::Object cached_call_target) {
void PrepareFunctionData(i::Isolate* isolate,
i::Handle<i::WasmExportedFunctionData> function_data,
- i::wasm::FunctionSig* sig) {
+ const i::wasm::FunctionSig* sig) {
// If the data is already populated, return immediately.
if (!function_data->c_wrapper_code().IsSmi()) return;
// Compile wrapper code.
@@ -1393,29 +1397,29 @@ void PrepareFunctionData(i::Isolate* isolate,
function_data->set_wasm_call_target(*call_target);
}
-void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
+void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
i::wasm::CWasmArgumentsPacker* packer, StoreImpl* store) {
for (size_t i = 0; i < sig->parameter_count(); i++) {
i::wasm::ValueType type = sig->GetParam(i);
- switch (type) {
- case i::wasm::kWasmI32:
+ switch (type.kind()) {
+ case i::wasm::ValueType::kI32:
packer->Push(args[i].i32());
break;
- case i::wasm::kWasmI64:
+ case i::wasm::ValueType::kI64:
packer->Push(args[i].i64());
break;
- case i::wasm::kWasmF32:
+ case i::wasm::ValueType::kF32:
packer->Push(args[i].f32());
break;
- case i::wasm::kWasmF64:
+ case i::wasm::ValueType::kF64:
packer->Push(args[i].f64());
break;
- case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmFuncRef:
- case i::wasm::kWasmNullRef:
+ case i::wasm::ValueType::kAnyRef:
+ case i::wasm::ValueType::kFuncRef:
+ case i::wasm::ValueType::kNullRef:
packer->Push(WasmRefToV8(store->i_isolate(), args[i].ref())->ptr());
break;
- case i::wasm::kWasmExnRef:
+ case i::wasm::ValueType::kExnRef:
// TODO(jkummerow): Implement these.
UNIMPLEMENTED();
break;
@@ -1425,34 +1429,34 @@ void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
}
}
-void PopArgs(i::wasm::FunctionSig* sig, Val results[],
+void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
i::wasm::CWasmArgumentsPacker* packer, StoreImpl* store) {
packer->Reset();
for (size_t i = 0; i < sig->return_count(); i++) {
i::wasm::ValueType type = sig->GetReturn(i);
- switch (type) {
- case i::wasm::kWasmI32:
+ switch (type.kind()) {
+ case i::wasm::ValueType::kI32:
results[i] = Val(packer->Pop<int32_t>());
break;
- case i::wasm::kWasmI64:
+ case i::wasm::ValueType::kI64:
results[i] = Val(packer->Pop<int64_t>());
break;
- case i::wasm::kWasmF32:
+ case i::wasm::ValueType::kF32:
results[i] = Val(packer->Pop<float>());
break;
- case i::wasm::kWasmF64:
+ case i::wasm::ValueType::kF64:
results[i] = Val(packer->Pop<double>());
break;
- case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmFuncRef:
- case i::wasm::kWasmNullRef: {
+ case i::wasm::ValueType::kAnyRef:
+ case i::wasm::ValueType::kFuncRef:
+ case i::wasm::ValueType::kNullRef: {
i::Address raw = packer->Pop<i::Address>();
i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
DCHECK_IMPLIES(type == i::wasm::kWasmNullRef, obj->IsNull());
results[i] = Val(V8RefValueToWasm(store, obj));
break;
}
- case i::wasm::kWasmExnRef:
+ case i::wasm::ValueType::kExnRef:
// TODO(jkummerow): Implement these.
UNIMPLEMENTED();
break;
@@ -1512,7 +1516,8 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
i::Handle<i::WasmInstanceObject> instance(function_data->instance(), isolate);
int function_index = function_data->function_index();
// Caching {sig} would give a ~10% reduction in overhead.
- i::wasm::FunctionSig* sig = instance->module()->functions[function_index].sig;
+ const i::wasm::FunctionSig* sig =
+ instance->module()->functions[function_index].sig;
PrepareFunctionData(isolate, function_data, sig);
i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
i::Code::cast(function_data->c_wrapper_code()), isolate);
@@ -1693,17 +1698,17 @@ auto Global::type() const -> own<GlobalType> {
auto Global::get() const -> Val {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
- switch (v8_global->type()) {
- case i::wasm::kWasmI32:
+ switch (v8_global->type().kind()) {
+ case i::wasm::ValueType::kI32:
return Val(v8_global->GetI32());
- case i::wasm::kWasmI64:
+ case i::wasm::ValueType::kI64:
return Val(v8_global->GetI64());
- case i::wasm::kWasmF32:
+ case i::wasm::ValueType::kF32:
return Val(v8_global->GetF32());
- case i::wasm::kWasmF64:
+ case i::wasm::ValueType::kF64:
return Val(v8_global->GetF64());
- case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmFuncRef: {
+ case i::wasm::ValueType::kAnyRef:
+ case i::wasm::ValueType::kFuncRef: {
StoreImpl* store = impl(this)->store();
i::HandleScope scope(store->i_isolate());
return Val(V8RefValueToWasm(store, v8_global->GetRef()));
@@ -1810,11 +1815,11 @@ auto Table::type() const -> own<TableType> {
uint32_t max;
if (!table->maximum_length().ToUint32(&max)) max = 0xFFFFFFFFu;
ValKind kind;
- switch (table->type()) {
- case i::wasm::kWasmFuncRef:
+ switch (table->type().kind()) {
+ case i::wasm::ValueType::kFuncRef:
kind = FUNCREF;
break;
- case i::wasm::kWasmAnyRef:
+ case i::wasm::ValueType::kAnyRef:
kind = ANYREF;
break;
default:
@@ -1879,11 +1884,13 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> {
const Limits& limits = type->limits();
uint32_t minimum = limits.min;
- if (minimum > i::wasm::max_mem_pages()) return nullptr;
+ // The max_initial_mem_pages limit is only spec'ed for JS embeddings,
+ // so we'll directly use the maximum pages limit here.
+ if (minimum > i::wasm::kSpecMaxWasmMaximumMemoryPages) return nullptr;
uint32_t maximum = limits.max;
if (maximum != Limits(0).max) {
if (maximum < minimum) return nullptr;
- if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr;
+ if (maximum > i::wasm::kSpecMaxWasmMaximumMemoryPages) return nullptr;
}
// TODO(wasm+): Support shared memory.
i::SharedFlag shared = i::SharedFlag::kNotShared;
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 629ebe913e..be60dfd519 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -60,11 +60,15 @@ struct CompilationEnv {
const LowerSimd lower_simd;
+ // Whether the debugger is active.
+ const bool debug;
+
constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler,
RuntimeExceptionSupport runtime_exception_support,
const WasmFeatures& enabled_features,
- LowerSimd lower_simd = kNoLowerSimd)
+ LowerSimd lower_simd = kNoLowerSimd,
+ bool debug = false)
: module(module),
use_trap_handler(use_trap_handler),
runtime_exception_support(runtime_exception_support),
@@ -72,10 +76,11 @@ struct CompilationEnv {
: 0),
max_memory_size((module && module->has_maximum_pages
? module->maximum_pages
- : max_mem_pages()) *
+ : max_initial_mem_pages()) *
uint64_t{kWasmPageSize}),
enabled_features(enabled_features),
- lower_simd(lower_simd) {}
+ lower_simd(lower_simd),
+ debug(debug) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index f6c88d0416..3c0c0493b0 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -266,6 +266,7 @@ class Decoder {
return offset - buffer_offset_;
}
const byte* end() const { return end_; }
+ void set_end(const byte* end) { end_ = end; }
// Check if the byte at {offset} from the current pc equals {expected}.
bool lookahead(int offset, byte expected) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 496598b18f..9752d4ef0c 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -277,7 +277,7 @@ struct BlockTypeImmediate {
uint32_t length = 1;
ValueType type = kWasmStmt;
uint32_t sig_index = 0;
- FunctionSig* sig = nullptr;
+ const FunctionSig* sig = nullptr;
inline BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
const byte* pc) {
@@ -377,7 +377,7 @@ template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
uint32_t table_index;
uint32_t sig_index;
- FunctionSig* sig = nullptr;
+ const FunctionSig* sig = nullptr;
uint32_t length = 0;
inline CallIndirectImmediate(const WasmFeatures enabled, Decoder* decoder,
const byte* pc) {
@@ -397,7 +397,7 @@ struct CallIndirectImmediate {
template <Decoder::ValidateFlag validate>
struct CallFunctionImmediate {
uint32_t index;
- FunctionSig* sig = nullptr;
+ const FunctionSig* sig = nullptr;
uint32_t length;
inline CallFunctionImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
@@ -782,7 +782,7 @@ template <Decoder::ValidateFlag validate>
class WasmDecoder : public Decoder {
public:
WasmDecoder(const WasmModule* module, const WasmFeatures& enabled,
- WasmFeatures* detected, FunctionSig* sig, const byte* start,
+ WasmFeatures* detected, const FunctionSig* sig, const byte* start,
const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, end, buffer_offset),
module_(module),
@@ -793,7 +793,7 @@ class WasmDecoder : public Decoder {
const WasmModule* module_;
const WasmFeatures enabled_;
WasmFeatures* detected_;
- FunctionSig* sig_;
+ const FunctionSig* sig_;
ZoneVector<ValueType>* local_types_;
@@ -989,7 +989,7 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool CanReturnCall(FunctionSig* target_sig) {
+ inline bool CanReturnCall(const FunctionSig* target_sig) {
if (target_sig == nullptr) return false;
size_t num_returns = sig_->return_count();
if (num_returns != target_sig->return_count()) return false;
@@ -1133,11 +1133,15 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->functions.size())) {
+ if (!module_) return true;
+ if (!VALIDATE(imm.index < module_->functions.size())) {
errorf(pc, "invalid function index: %u", imm.index);
return false;
}
+ if (!VALIDATE(module_->functions[imm.index].declared)) {
+ this->errorf(pc, "undeclared reference to function #%u", imm.index);
+ return false;
+ }
return true;
}
@@ -1191,8 +1195,16 @@ class WasmDecoder : public Decoder {
imm.elem_segment_index);
return false;
}
- if (!Validate(pc_ + imm.length - imm.table.length - 1, imm.table))
+ if (!Validate(pc_ + imm.length - imm.table.length - 1, imm.table)) {
+ return false;
+ }
+ ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
+ if (!VALIDATE(
+ elem_type.IsSubTypeOf(module_->tables[imm.table.index].type))) {
+ errorf(pc_ + 2, "table %u is not a super-type of %s", imm.table.index,
+ elem_type.type_name());
return false;
+ }
return true;
}
@@ -1208,6 +1220,13 @@ class WasmDecoder : public Decoder {
inline bool Validate(TableCopyImmediate<validate>& imm) {
if (!Validate(pc_ + 1, imm.table_src)) return false;
if (!Validate(pc_ + 2, imm.table_dst)) return false;
+ ValueType src_type = module_->tables[imm.table_src.index].type;
+ if (!VALIDATE(
+ src_type.IsSubTypeOf(module_->tables[imm.table_dst.index].type))) {
+ errorf(pc_ + 2, "table %u is not a super-type of %s", imm.table_dst.index,
+ src_type.type_name());
+ return false;
+ }
return true;
}
@@ -1418,7 +1437,7 @@ class WasmDecoder : public Decoder {
std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// Handle "simple" opcodes with a fixed signature first.
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) return {sig->parameter_count(), sig->return_count()};
@@ -1927,7 +1946,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto fval = Pop();
auto tval = Pop(0, fval.type);
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
- if (ValueTypes::IsSubType(type, kWasmAnyRef)) {
+ if (type.IsSubTypeOf(kWasmAnyRef)) {
this->error(
"select without type is only valid for value type inputs");
break;
@@ -2367,7 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
default: {
// Deal with special asmjs opcodes.
if (this->module_ != nullptr && is_asmjs_module(this->module_)) {
- FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
+ const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
}
@@ -2409,7 +2428,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(val.pc + 1));
}
- TRACE_PART(" %c@%d:%s", ValueTypes::ShortNameOf(val.type),
+ TRACE_PART(" %c@%d:%s", val.type.short_name(),
static_cast<int>(val.pc - this->start_),
WasmOpcodes::OpcodeName(opcode));
// If the decoder failed, don't try to decode the immediates, as this
@@ -2479,7 +2498,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
// Pops arguments as required by signature.
- V8_INLINE ArgVector PopArgs(FunctionSig* sig) {
+ V8_INLINE ArgVector PopArgs(const FunctionSig* sig) {
int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
ArgVector args(count);
for (int i = count - 1; i >= 0; --i) {
@@ -2488,7 +2507,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return args;
}
- ValueType GetReturnType(FunctionSig* sig) {
+ ValueType GetReturnType(const FunctionSig* sig) {
DCHECK_GE(1, sig->return_count());
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
@@ -2531,7 +2550,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + 1, type.size_log_2());
auto index = Pop(0, kWasmI32);
- auto* result = Push(ValueType::kWasmS128);
+ auto* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
result);
return imm.length;
@@ -2583,15 +2602,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->enabled_.has_anyref()) {
// The expected type is the biggest common sub type of all targets.
(*result_types)[i] =
- ValueTypes::CommonSubType((*result_types)[i], (*merge)[i].type);
+ ValueType::CommonSubType((*result_types)[i], (*merge)[i].type);
} else {
// All target must have the same signature.
if ((*result_types)[i] != (*merge)[i].type) {
this->errorf(pos,
"inconsistent type in br_table target %u (previous "
"was %s, this one is %s)",
- index, ValueTypes::TypeName((*result_types)[i]),
- ValueTypes::TypeName((*merge)[i].type));
+ index, (*result_types)[i].type_name(),
+ (*merge)[i].type.type_name());
return false;
}
}
@@ -2601,7 +2620,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool TypeCheckBrTable(const std::vector<ValueType>& result_types) {
int br_arity = static_cast<int>(result_types.size());
- if (V8_LIKELY(control_.back().reachable())) {
+ if (V8_LIKELY(!control_.back().unreachable())) {
int available =
static_cast<int>(stack_.size()) - control_.back().stack_depth;
// There have to be enough values on the stack.
@@ -2616,11 +2635,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Type-check the topmost br_arity values on the stack.
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
- if (!ValueTypes::IsSubType(val.type, result_types[i])) {
+ if (!val.type.IsSubTypeOf(result_types[i])) {
this->errorf(this->pc_,
"type error in merge[%u] (expected %s, got %s)", i,
- ValueTypes::TypeName(result_types[i]),
- ValueTypes::TypeName(val.type));
+ result_types[i].type_name(), val.type.type_name());
return false;
}
}
@@ -2760,7 +2778,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
LoadTransformationKind::kExtend);
break;
default: {
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (!FLAG_wasm_simd_post_mvp &&
+ WasmOpcodes::IsSimdPostMvpOpcode(opcode)) {
+ this->error(
+ "simd opcode not available, enable with --wasm-simd-post-mvp");
+ break;
+ }
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
this->error("invalid simd opcode");
break;
@@ -2777,7 +2801,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t DecodeAtomicOpcode(WasmOpcode opcode) {
uint32_t len = 0;
ValueType ret_type;
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
this->error("invalid atomic opcode");
return 0;
@@ -2825,7 +2849,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
unsigned DecodeNumericOpcode(WasmOpcode opcode) {
unsigned len = 0;
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig != nullptr) {
switch (opcode) {
case kExprI32SConvertSatF32:
@@ -2967,7 +2991,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_EQ(c->stack_depth + merge->arity, stack_.size());
}
- Value* PushReturns(FunctionSig* sig) {
+ Value* PushReturns(const FunctionSig* sig) {
size_t return_count = sig->return_count();
if (return_count == 0) return nullptr;
size_t old_size = stack_.size();
@@ -2979,12 +3003,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (!VALIDATE(ValueTypes::IsSubType(val.type, expected) ||
- val.type == kWasmBottom || expected == kWasmBottom)) {
+ if (!VALIDATE(val.type.IsSubTypeOf(expected) || val.type == kWasmBottom ||
+ expected == kWasmBottom)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(this->pc_), index,
- ValueTypes::TypeName(expected), SafeOpcodeNameAt(val.pc),
- ValueTypes::TypeName(val.type));
+ SafeOpcodeNameAt(this->pc_), index, expected.type_name(),
+ SafeOpcodeNameAt(val.pc), val.type.type_name());
}
return val;
}
@@ -3044,10 +3067,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (!ValueTypes::IsSubType(val.type, old.type)) {
+ if (!val.type.IsSubTypeOf(old.type)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, ValueTypes::TypeName(old.type),
- ValueTypes::TypeName(val.type));
+ i, old.type.type_name(), val.type.type_name());
return false;
}
}
@@ -3062,10 +3084,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < c->start_merge.arity; ++i) {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
- if (!ValueTypes::IsSubType(start.type, end.type)) {
+ if (!start.type.IsSubTypeOf(end.type)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, ValueTypes::TypeName(end.type),
- ValueTypes::TypeName(start.type));
+ i, end.type.type_name(), start.type.type_name());
return false;
}
}
@@ -3167,11 +3188,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (int i = 0; i < num_returns; ++i) {
auto& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (!ValueTypes::IsSubType(val.type, expected_type)) {
+ if (!val.type.IsSubTypeOf(expected_type)) {
this->errorf(this->pc_,
"type error in return[%u] (expected %s, got %s)", i,
- ValueTypes::TypeName(expected_type),
- ValueTypes::TypeName(val.type));
+ expected_type.type_name(), val.type.type_name());
return false;
}
}
@@ -3188,11 +3208,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (WasmOpcodes::IsAnyRefOpcode(opcode)) {
RET_ON_PROTOTYPE_OPCODE(anyref);
}
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
BuildSimpleOperator(opcode, sig);
}
- void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ void BuildSimpleOperator(WasmOpcode opcode, const FunctionSig* sig) {
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 3113d1665b..63788bcc8d 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -59,8 +59,8 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
}
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
- FunctionSig* sig, const byte* pc,
- const byte* end) {
+ const FunctionSig* sig,
+ const byte* pc, const byte* end) {
WasmFeatures unused_detected_features = WasmFeatures::None();
WasmDecoder<Decoder::kNoValidate> decoder(
module, WasmFeatures::All(), &unused_detected_features, sig, pc, end);
@@ -135,12 +135,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
if (decls.type_list[pos] == type) {
++count;
} else {
- os << " " << count << " " << ValueTypes::TypeName(type);
+ os << " " << count << " " << type.type_name();
type = decls.type_list[pos];
count = 1;
}
}
- os << " " << count << " " << ValueTypes::TypeName(type);
+ os << " " << count << " " << type.type_name();
}
os << std::endl;
if (line_numbers) line_numbers->push_back(kNoByteCode);
@@ -210,10 +210,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
}
+ os << " // " << WasmOpcodes::OpcodeName(opcode);
+
switch (opcode) {
case kExprElse:
case kExprCatch:
- os << " // @" << i.pc_offset();
+ os << " @" << i.pc_offset();
control_depth++;
break;
case kExprLoop:
@@ -222,38 +224,38 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
i.pc());
- os << " // @" << i.pc_offset();
+ os << " @" << i.pc_offset();
if (decoder.Complete(imm)) {
for (uint32_t i = 0; i < imm.out_arity(); i++) {
- os << " " << ValueTypes::TypeName(imm.out_type(i));
+ os << " " << imm.out_type(i).type_name();
}
}
control_depth++;
break;
}
case kExprEnd:
- os << " // @" << i.pc_offset();
+ os << " @" << i.pc_offset();
control_depth--;
break;
case kExprBr: {
BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << " // depth=" << imm.depth;
+ os << " depth=" << imm.depth;
break;
}
case kExprBrIf: {
BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << " // depth=" << imm.depth;
+ os << " depth=" << imm.depth;
break;
}
case kExprBrTable: {
BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << " // entries=" << imm.table_count;
+ os << " entries=" << imm.table_count;
break;
}
case kExprCallIndirect: {
CallIndirectImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
i.pc());
- os << " // sig #" << imm.sig_index;
+ os << " sig #" << imm.sig_index;
if (decoder.Complete(i.pc(), imm)) {
os << ": " << *imm.sig;
}
@@ -261,7 +263,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
case kExprCallFunction: {
CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << " // function #" << imm.index;
+ os << " function #" << imm.index;
if (decoder.Complete(i.pc(), imm)) {
os << ": " << *imm.sig;
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index ef91d8a0e9..f542540233 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -24,12 +24,12 @@ struct WasmModule; // forward declaration of module interface.
// A wrapper around the signature and bytes of a function.
struct FunctionBody {
- FunctionSig* sig; // function signature
- uint32_t offset; // offset in the module bytes, for error reporting
- const byte* start; // start of the function body
- const byte* end; // end of the function body
+ const FunctionSig* sig; // function signature
+ uint32_t offset; // offset in the module bytes, for error reporting
+ const byte* start; // start of the function body
+ const byte* end; // end of the function body
- FunctionBody(FunctionSig* sig, uint32_t offset, const byte* start,
+ FunctionBody(const FunctionSig* sig, uint32_t offset, const byte* start,
const byte* end)
: sig(sig), offset(offset), start(start), end(end) {}
};
@@ -81,8 +81,8 @@ V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
// local stack effect (e.g. BrIf pops 1, Br pops 0). Those opcodes can have
// non-local stack effect though, which are not covered here.
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
- FunctionSig* sig, const byte* pc,
- const byte* end);
+ const FunctionSig* sig,
+ const byte* pc, const byte* end);
// A simple forward iterator for bytecodes.
class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index f285deca1c..1b6b83a3b1 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -145,7 +145,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
WasmEngine* engine, CompilationEnv* env) {
- FunctionSig* sig = env->module->functions[func_index_].sig;
+ const FunctionSig* sig = env->module->functions[func_index_].sig;
// Assume the wrapper is going to be a JS function with matching arity at
// instantiation time.
auto kind = compiler::kDefaultImportCallKind;
@@ -265,8 +265,8 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
}
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
- Isolate* isolate, WasmEngine* wasm_engine, FunctionSig* sig, bool is_import,
- const WasmFeatures& enabled_features)
+ Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
+ bool is_import, const WasmFeatures& enabled_features)
: is_import_(is_import),
sig_(sig),
job_(compiler::NewJSToWasmCompilationJob(isolate, wasm_engine, sig,
@@ -293,7 +293,7 @@ Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
// static
Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
- Isolate* isolate, FunctionSig* sig, bool is_import) {
+ Isolate* isolate, const FunctionSig* sig, bool is_import) {
// Run the compilation unit synchronously.
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 9d6d40e140..a511f19b76 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -67,8 +67,8 @@ struct WasmCompilationResult {
uint32_t frame_slot_count = 0;
uint32_t tagged_parameter_slots = 0;
OwnedVector<byte> source_positions;
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
- int func_index = static_cast<int>(kAnonymousFuncIndex);
+ OwnedVector<byte> protected_instructions_data;
+ int func_index = kAnonymousFuncIndex;
ExecutionTier requested_tier;
ExecutionTier result_tier;
Kind kind = kFunction;
@@ -113,7 +113,7 @@ STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
public:
JSToWasmWrapperCompilationUnit(Isolate* isolate, WasmEngine* wasm_engine,
- FunctionSig* sig, bool is_import,
+ const FunctionSig* sig, bool is_import,
const WasmFeatures& enabled_features);
~JSToWasmWrapperCompilationUnit();
@@ -121,15 +121,16 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
Handle<Code> Finalize(Isolate* isolate);
bool is_import() const { return is_import_; }
- FunctionSig* sig() const { return sig_; }
+ const FunctionSig* sig() const { return sig_; }
// Run a compilation unit synchronously.
- static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ const FunctionSig* sig,
bool is_import);
private:
bool is_import_;
- FunctionSig* sig_;
+ const FunctionSig* sig_;
std::unique_ptr<OptimizedCompilationJob> job_;
};
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index e2ddafbf30..5f73f27200 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -397,8 +397,9 @@ class WasmGraphBuildingInterface {
LoadTransformationKind transform,
const MemoryAccessImmediate<validate>& imm,
const Value& index, Value* result) {
- result->node = BUILD(LoadTransform, type.mem_type(), transform, index.node,
- imm.offset, imm.alignment, decoder->position());
+ result->node =
+ BUILD(LoadTransform, type.value_type(), type.mem_type(), transform,
+ index.node, imm.offset, imm.alignment, decoder->position());
}
void StoreMem(FullDecoder* decoder, StoreType type,
@@ -491,7 +492,8 @@ class WasmGraphBuildingInterface {
TFNode* if_no_match = nullptr;
// Get the exception tag and see if it matches the expected one.
- TFNode* caught_tag = BUILD(GetExceptionTag, exception.node);
+ TFNode* caught_tag =
+ BUILD(GetExceptionTag, exception.node, decoder->position());
TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
BUILD(BranchNoHint, compare, &if_match, &if_no_match);
@@ -694,21 +696,21 @@ class WasmGraphBuildingInterface {
}
TFNode* DefaultValue(ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
return builder_->Int32Constant(0);
- case kWasmI64:
+ case ValueType::kI64:
return builder_->Int64Constant(0);
- case kWasmF32:
+ case ValueType::kF32:
return builder_->Float32Constant(0);
- case kWasmF64:
+ case ValueType::kF64:
return builder_->Float64Constant(0);
- case kWasmS128:
+ case ValueType::kS128:
return builder_->S128Zero();
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef:
return builder_->RefNull();
default:
UNREACHABLE();
@@ -729,13 +731,12 @@ class WasmGraphBuildingInterface {
Value& val = values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
- DCHECK(val.type == kWasmBottom ||
- ValueTypes::MachineRepresentationFor(val.type) ==
- ValueTypes::MachineRepresentationFor(old.type));
+ DCHECK(val.type == kWasmBottom || val.type.machine_representation() ==
+ old.type.machine_representation());
old.node = first ? val.node
: builder_->CreateOrMergeIntoPhi(
- ValueTypes::MachineRepresentationFor(old.type),
- target->control, old.node, val.node);
+ old.type.machine_representation(), target->control,
+ old.node, val.node);
}
}
@@ -797,8 +798,8 @@ class WasmGraphBuildingInterface {
// Merge locals.
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
to->locals[i] = builder_->CreateOrMergeIntoPhi(
- ValueTypes::MachineRepresentationFor(decoder->GetLocalType(i)),
- merge, to->locals[i], ssa_env_->locals[i]);
+ decoder->GetLocalType(i).machine_representation(), merge,
+ to->locals[i], ssa_env_->locals[i]);
}
// Merge the instance caches.
builder_->MergeInstanceCacheInto(&to->instance_cache,
@@ -910,7 +911,7 @@ class WasmGraphBuildingInterface {
}
void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node,
- FunctionSig* sig, uint32_t sig_index, const Value args[],
+ const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
size_t param_count = sig->parameter_count();
size_t return_count = sig->return_count();
@@ -936,8 +937,8 @@ class WasmGraphBuildingInterface {
}
void DoReturnCall(FullDecoder* decoder, uint32_t table_index,
- TFNode* index_node, FunctionSig* sig, uint32_t sig_index,
- const Value args[]) {
+ TFNode* index_node, const FunctionSig* sig,
+ uint32_t sig_index, const Value args[]) {
size_t arg_count = sig->parameter_count();
base::SmallVector<TFNode*, 16> arg_nodes(arg_count + 1);
arg_nodes[0] = index_node;
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index ad73562d26..90cdad4672 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -134,6 +134,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
int start = pc_offset();
+ CodeEntry(); // 0-1 instr
Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // 1-2 instr
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
@@ -147,6 +148,8 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
return false;
}
+ CodeEntry();
+
Jump(target, RelocInfo::NONE);
return true;
}
@@ -157,22 +160,31 @@ void JumpTableAssembler::EmitFarJumpSlot(Address target) {
// will only be called for the very specific runtime slot table, and we want
// to have maximum control over the generated code.
// Do not reuse this code without validating that the same assumptions hold.
+ CodeEntry(); // 0-1 instructions
constexpr Register kTmpReg = x16;
DCHECK(TmpList()->IncludesAliasOf(kTmpReg));
- // Load from [pc + 2 * kInstrSize] to {kTmpReg}, then branch there.
- ldr_pcrel(kTmpReg, 2); // 1 instruction
- br(kTmpReg); // 1 instruction
- dq(target); // 8 bytes (== 2 instructions)
+ int kOffset = ENABLE_CONTROL_FLOW_INTEGRITY_BOOL ? 3 : 2;
+ // Load from [pc + kOffset * kInstrSize] to {kTmpReg}, then branch there.
+ ldr_pcrel(kTmpReg, kOffset); // 1 instruction
+ br(kTmpReg); // 1 instruction
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ nop(); // To keep the target below aligned to kSystemPointerSize.
+#endif
+ dq(target); // 8 bytes (== 2 instructions)
STATIC_ASSERT(2 * kInstrSize == kSystemPointerSize);
- STATIC_ASSERT(kFarJumpTableSlotSize == 4 * kInstrSize);
+ const int kSlotCount = ENABLE_CONTROL_FLOW_INTEGRITY_BOOL ? 6 : 4;
+ STATIC_ASSERT(kFarJumpTableSlotSize == kSlotCount * kInstrSize);
}
// static
void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ // See {EmitFarJumpSlot} for the offset of the target (16 bytes with
+ // CFI enabled, 8 bytes otherwise).
+ int kTargetOffset =
+ ENABLE_CONTROL_FLOW_INTEGRITY_BOOL ? 4 * kInstrSize : 2 * kInstrSize;
// The slot needs to be pointer-size aligned so we can atomically update it.
- DCHECK(IsAligned(slot, kSystemPointerSize));
- // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
- reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize)
+ DCHECK(IsAligned(slot + kTargetOffset, kSystemPointerSize));
+ reinterpret_cast<std::atomic<Address>*>(slot + kTargetOffset)
->store(target, std::memory_order_relaxed);
// The data update is guaranteed to be atomic since it's a properly aligned
// and stores a single machine word. This update will eventually be observed
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 4aeee29e26..253f0bc018 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -20,9 +20,8 @@ namespace wasm {
// other purposes:
// - the far stub table contains one entry per wasm runtime stub (see
// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
-// builtin, plus (if {FLAG_wasm_far_jump_table} is enabled and not the full
-// address space can be reached via the jump table) one entry per wasm
-// function.
+// builtin, plus (if not the full address space can be reached via the jump
+// table) one entry per wasm function.
// - the lazy compile table contains one entry per wasm function which jumps to
// the common {WasmCompileLazy} builtin and passes the function index that was
// invoked.
@@ -186,7 +185,12 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 2 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize;
-#elif V8_TARGET_ARCH_ARM64
+#elif V8_TARGET_ARCH_ARM64 && V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ static constexpr int kJumpTableLineSize = 2 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 2 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 4 * kInstrSize;
+#elif V8_TARGET_ARCH_ARM64 && !V8_ENABLE_CONTROL_FLOW_INTEGRITY
static constexpr int kJumpTableLineSize = 1 * kInstrSize;
static constexpr int kJumpTableSlotSize = 1 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index ba1ebffe2c..37a210086b 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -29,7 +29,7 @@ size_t LocalDeclEncoder::Emit(byte* buffer) const {
LEBHelper::write_u32v(&pos, static_cast<uint32_t>(local_decls.size()));
for (auto& local_decl : local_decls) {
LEBHelper::write_u32v(&pos, local_decl.first);
- *pos = ValueTypes::ValueTypeCodeFor(local_decl.second);
+ *pos = local_decl.second.value_type_code();
++pos;
}
DCHECK_EQ(Size(), pos - buffer);
diff --git a/deps/v8/src/wasm/local-decl-encoder.h b/deps/v8/src/wasm/local-decl-encoder.h
index 6fd2314d2d..5bce05871c 100644
--- a/deps/v8/src/wasm/local-decl-encoder.h
+++ b/deps/v8/src/wasm/local-decl-encoder.h
@@ -14,11 +14,10 @@ namespace v8 {
namespace internal {
namespace wasm {
-// A helper for encoding local declarations prepended to the body of a
-// function.
+// A helper for encoding local declarations prepended to the body of a function.
class V8_EXPORT_PRIVATE LocalDeclEncoder {
public:
- explicit LocalDeclEncoder(Zone* zone, FunctionSig* s = nullptr)
+ explicit LocalDeclEncoder(Zone* zone, const FunctionSig* s = nullptr)
: sig(s), local_decls(zone), total(0) {}
// Prepend local declarations by creating a new buffer and copying data
@@ -34,11 +33,11 @@ class V8_EXPORT_PRIVATE LocalDeclEncoder {
size_t Size() const;
bool has_sig() const { return sig != nullptr; }
- FunctionSig* get_sig() const { return sig; }
- void set_sig(FunctionSig* s) { sig = s; }
+ const FunctionSig* get_sig() const { return sig; }
+ void set_sig(const FunctionSig* s) { sig = s; }
private:
- FunctionSig* sig;
+ const FunctionSig* sig;
ZoneVector<std::pair<uint32_t, ValueType>> local_decls;
size_t total;
};
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index ca5c785247..369dcfd9f7 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -381,8 +381,13 @@ class CompilationStateImpl {
// is invoked which triggers background compilation.
void InitializeCompilationProgress(bool lazy_module, int num_wrappers);
- // Initialize compilation progress for recompilation of the whole module.
- void InitializeRecompilationProgress(ExecutionTier tier);
+ // Initialize recompilation of the whole module: Setup compilation progress
+ // for recompilation and add the respective compilation units. The callback is
+ // called immediately if no recompilation is needed, or called later
+ // otherwise.
+ void InitializeRecompilation(
+ ExecutionTier tier,
+ CompilationState::callback_t recompilation_finished_callback);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -404,11 +409,8 @@ class CompilationStateImpl {
void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray>* export_wrappers_out);
- void OnFinishedUnits(Vector<WasmCode*>);
+ void OnFinishedUnits(Vector<WasmCode*>, Vector<WasmCompilationResult>);
void OnFinishedJSToWasmWrapperUnits(int num);
- void TriggerCallbacks(bool completes_baseline_compilation,
- bool completes_top_tier_compilation,
- bool completes_recompilation = false);
void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
void UpdateDetectedFeatures(const WasmFeatures& detected);
@@ -481,6 +483,11 @@ class CompilationStateImpl {
}
private:
+ // Trigger callbacks according to the internal counters below
+ // (outstanding_...), plus the given events.
+ // Hold the {callbacks_mutex_} when calling this method.
+ void TriggerCallbacks(base::EnumSet<CompilationEvent> additional_events = {});
+
NativeModule* const native_module_;
const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
const CompileMode compile_mode_;
@@ -541,11 +548,15 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
+ // Events that already happened.
+ base::EnumSet<CompilationEvent> finished_events_;
+
int outstanding_baseline_units_ = 0;
int outstanding_top_tier_functions_ = 0;
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
+ ExecutionTier recompilation_tier_;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -553,6 +564,7 @@ class CompilationStateImpl {
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
+ using ReachedRecompilationTierField = base::BitField8<ExecutionTier, 6, 2>;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -761,8 +773,9 @@ class CompilationUnitBuilder {
tiering_units_.emplace_back(func_index, tiers.top_tier);
}
- void AddBaselineUnit(int func_index) {
- baseline_units_.emplace_back(func_index, ExecutionTier::kLiftoff);
+ void AddRecompilationUnit(int func_index, ExecutionTier tier) {
+ // For recompilation, just treat all units like baseline units.
+ baseline_units_.emplace_back(func_index, tier);
}
bool Commit() {
@@ -821,13 +834,8 @@ DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
FunctionBody body{func->sig, func->code.offset(), code.begin(), code.end()};
DecodeResult result;
- auto time_counter =
- SELECT_WASM_COUNTER(counters, module->origin, wasm_decode, function_time);
- TimedHistogramScope wasm_decode_function_time_scope(time_counter);
WasmFeatures detected;
- result = VerifyWasmCode(allocator, enabled_features, module, &detected, body);
-
- return result;
+ return VerifyWasmCode(allocator, enabled_features, module, &detected, body);
}
enum OnlyLazyFunctions : bool {
@@ -879,12 +887,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
Counters* counters = isolate->counters();
DCHECK(!native_module->lazy_compile_frozen());
- HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
NativeModuleModificationScope native_module_modification_scope(native_module);
- base::ElapsedTimer compilation_timer;
- compilation_timer.Start();
-
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
CompilationStateImpl* compilation_state =
@@ -924,14 +928,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
- double func_kb = 1e-3 * func->code.length();
- double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
-
counters->wasm_lazily_compiled_functions()->Increment();
- int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
- counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
-
const bool lazy_module = IsLazyModule(module);
if (GetCompileStrategy(module, enabled_features, func_index, lazy_module) ==
CompileStrategy::kLazy &&
@@ -970,7 +968,7 @@ bool ExecuteJSToWasmWrapperCompilationUnits(
++num_processed_wrappers;
}
} while (wrapper_unit);
- {
+ if (num_processed_wrappers > 0) {
BackgroundCompileScope compile_scope(token);
if (compile_scope.cancelled()) return false;
compile_scope.compilation_state()->OnFinishedJSToWasmWrapperUnits(
@@ -991,7 +989,7 @@ bool ExecuteCompilationUnits(
TRACE_COMPILE("Compiling (task %d)...\n", task_id);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
- // Execute JS to WASM wrapper units first, so that they are ready to be
+ // Execute JS to Wasm wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
// triggered.
if (!ExecuteJSToWasmWrapperCompilationUnits(token)) {
@@ -1069,7 +1067,8 @@ bool ExecuteCompilationUnits(
DCHECK_LE(0, func_index);
DCHECK_LT(func_index, native_module->num_functions());
if (func_index < num_imported_functions) {
- FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ const FunctionSig* sig =
+ native_module->module()->functions[func_index].sig;
WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind,
sig);
// If two imported functions have the same key, only one of them should
@@ -1083,7 +1082,8 @@ bool ExecuteCompilationUnits(
native_module->engine()->LogCode(VectorOf(code_vector));
- compile_scope->compilation_state()->OnFinishedUnits(VectorOf(code_vector));
+ compile_scope->compilation_state()->OnFinishedUnits(
+ VectorOf(code_vector), VectorOf(results_to_publish));
results_to_publish.clear();
};
@@ -1165,7 +1165,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
keys;
int num_imported_functions = native_module->num_imported_functions();
for (int func_index = 0; func_index < num_imported_functions; func_index++) {
- FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ const FunctionSig* sig = native_module->module()->functions[func_index].sig;
if (!IsJSCompatibleSignature(sig, native_module->enabled_features())) {
continue;
}
@@ -1188,10 +1188,15 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
CompilationUnitBuilder builder(native_module);
auto* module = native_module->module();
+ const bool prefer_liftoff = native_module->IsTieredDown();
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
+ if (prefer_liftoff) {
+ builder.AddRecompilationUnit(func_index, ExecutionTier::kLiftoff);
+ continue;
+ }
CompileStrategy strategy = GetCompileStrategy(
module, native_module->enabled_features(), func_index, lazy_module);
if (strategy == CompileStrategy::kLazy) {
@@ -1213,19 +1218,6 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
builder.Commit();
}
-void AddBaselineCompilationUnits(NativeModule* native_module) {
- CompilationUnitBuilder builder(native_module);
- auto* module = native_module->module();
-
- uint32_t start = module->num_imported_functions;
- uint32_t end = start + module->num_declared_functions;
- for (uint32_t func_index = start; func_index < end; func_index++) {
- builder.AddBaselineUnit(func_index);
- }
-
- builder.Commit();
-}
-
bool MayCompriseLazyFunctions(const WasmModule* module,
const WasmFeatures& enabled_features,
bool lazy_module) {
@@ -1377,8 +1369,8 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
Handle<FixedArray>* export_wrappers_out) {
const WasmModule* wasm_module = module.get();
std::shared_ptr<NativeModule> native_module =
- isolate->wasm_engine()->MaybeGetNativeModule(wasm_module->origin,
- wire_bytes.module_bytes());
+ isolate->wasm_engine()->MaybeGetNativeModule(
+ wasm_module->origin, wire_bytes.module_bytes(), isolate);
if (native_module) {
// TODO(thibaudm): Look into sharing export wrappers.
CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out);
@@ -1401,14 +1393,18 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get(),
uses_liftoff);
native_module = isolate->wasm_engine()->NewNativeModule(
- isolate, enabled, std::move(module), code_size_estimate);
+ isolate, enabled, module, code_size_estimate);
native_module->SetWireBytes(std::move(wire_bytes_copy));
CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
- isolate->wasm_engine()->UpdateNativeModuleCache(native_module,
- thrower->error());
+ bool cache_hit = !isolate->wasm_engine()->UpdateNativeModuleCache(
+ thrower->error(), &native_module, isolate);
if (thrower->error()) return {};
+ if (cache_hit) {
+ CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out);
+ return native_module;
+ }
Impl(native_module->compilation_state())
->FinalizeJSToWasmWrappers(isolate, native_module->module(),
export_wrappers_out);
@@ -1424,33 +1420,33 @@ void RecompileNativeModule(Isolate* isolate, NativeModule* native_module,
// Install a callback to notify us once background recompilation finished.
auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
auto* compilation_state = Impl(native_module->compilation_state());
- DCHECK_EQ(tier, ExecutionTier::kLiftoff);
+ DCHECK(tier == ExecutionTier::kTurbofan || tier == ExecutionTier::kLiftoff);
// The callback captures a shared ptr to the semaphore.
- compilation_state->AddCallback(
- [recompilation_finished_semaphore](CompilationEvent event) {
+ // Initialize the compilation units and kick off background compile tasks.
+ compilation_state->InitializeRecompilation(
+ tier, [recompilation_finished_semaphore](CompilationEvent event) {
if (event == CompilationEvent::kFinishedRecompilation) {
recompilation_finished_semaphore->Signal();
}
});
- // Initialize the compilation units and kick off background compile tasks.
- compilation_state->InitializeRecompilationProgress(tier);
- AddBaselineCompilationUnits(native_module);
-
- // The main thread contributes to the compilation, except if we need
- // deterministic compilation; in that case, the single background task will
- // execute all compilation.
- if (!NeedsDeterministicCompile()) {
- while (ExecuteCompilationUnits(
- compilation_state->background_compile_token(), isolate->counters(),
- kMainThreadTaskId, kBaselineOnly)) {
- // Continue executing compilation units.
+ // For tier down only.
+ if (tier == ExecutionTier::kLiftoff) {
+ // The main thread contributes to the compilation, except if we need
+ // deterministic compilation; in that case, the single background task will
+ // execute all compilation.
+ if (!NeedsDeterministicCompile()) {
+ while (ExecuteCompilationUnits(
+ compilation_state->background_compile_token(), isolate->counters(),
+ kMainThreadTaskId, kBaselineOnly)) {
+ // Continue executing compilation units.
+ }
}
- }
- // Now wait until baseline recompilation finished.
- recompilation_finished_semaphore->Wait();
- DCHECK(!compilation_state->failed());
+ // Now wait until baseline recompilation finished.
+ recompilation_finished_semaphore->Wait();
+ DCHECK(!compilation_state->failed());
+ }
}
AsyncCompileJob::AsyncCompileJob(
@@ -1488,7 +1484,11 @@ void AsyncCompileJob::Abort() {
class AsyncStreamingProcessor final : public StreamingProcessor {
public:
- explicit AsyncStreamingProcessor(AsyncCompileJob* job);
+ explicit AsyncStreamingProcessor(AsyncCompileJob* job,
+ std::shared_ptr<Counters> counters,
+ AccountingAllocator* allocator);
+
+ ~AsyncStreamingProcessor();
bool ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) override;
@@ -1525,12 +1525,21 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
WasmEngine* wasm_engine_;
std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
int num_functions_ = 0;
+ bool prefix_cache_hit_ = false;
+ bool before_code_section_ = true;
+ std::shared_ptr<Counters> async_counters_;
+ AccountingAllocator* allocator_;
+
+ // Running hash of the wire bytes up to code section size, but excluding the
+ // code section itself. Used by the {NativeModuleCache} to detect potential
+ // duplicate modules.
+ size_t prefix_hash_;
};
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
DCHECK_NULL(stream_);
- stream_.reset(
- new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(this)));
+ stream_.reset(new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(
+ this, isolate_->async_counters(), isolate_->allocator())));
return stream_;
}
@@ -1566,16 +1575,21 @@ void AsyncCompileJob::CreateNativeModule(
// Create the module object and populate with compiled functions and
// information needed at instantiation time.
- // TODO(clemensb): For the same module (same bytes / same hash), we should
- // only have one {WasmModuleObject}. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
- // Create the module object.
native_module_ = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, std::move(module), code_size_estimate);
native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
+}
- if (stream_) stream_->NotifyNativeModuleCreated(native_module_);
+bool AsyncCompileJob::GetOrCreateNativeModule(
+ std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
+ native_module_ = isolate_->wasm_engine()->MaybeGetNativeModule(
+ module->origin, wire_bytes_.module_bytes(), isolate_);
+ if (native_module_ == nullptr) {
+ CreateNativeModule(std::move(module), code_size_estimate);
+ return false;
+ }
+ return true;
}
void AsyncCompileJob::PrepareRuntimeObjects() {
@@ -1584,9 +1598,9 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
DCHECK(module_object_.is_null());
const WasmModule* module = native_module_->module();
auto source_url = stream_ ? stream_->url() : Vector<const char>();
- Handle<Script> script =
- CreateWasmScript(isolate_, wire_bytes_, VectorOf(module->source_map_url),
- module->name, source_url);
+ Handle<Script> script = CreateWasmScript(
+ isolate_, native_module_->wire_bytes(), VectorOf(module->source_map_url),
+ module->name, source_url);
Handle<WasmModuleObject> module_object =
WasmModuleObject::New(isolate_, native_module_, script);
@@ -1596,11 +1610,15 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
-void AsyncCompileJob::FinishCompile() {
+void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"AsyncCompileJob::FinishCompile");
bool is_after_deserialization = !module_object_.is_null();
+ auto compilation_state = Impl(native_module_->compilation_state());
if (!is_after_deserialization) {
+ if (stream_) {
+ stream_->NotifyNativeModuleCreated(native_module_);
+ }
PrepareRuntimeObjects();
}
@@ -1627,14 +1645,18 @@ void AsyncCompileJob::FinishCompile() {
isolate_->debug()->OnAfterCompile(script);
}
- auto compilation_state =
- Impl(module_object_->native_module()->compilation_state());
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
// just compile wrappers here.
if (!is_after_deserialization) {
Handle<FixedArray> export_wrappers;
- compilation_state->FinalizeJSToWasmWrappers(
- isolate_, module_object_->module(), &export_wrappers);
+ if (is_after_cache_hit) {
+ // TODO(thibaudm): Look into sharing wrappers.
+ CompileJsToWasmWrappers(isolate_, module_object_->module(),
+ &export_wrappers);
+ } else {
+ compilation_state->FinalizeJSToWasmWrappers(
+ isolate_, module_object_->module(), &export_wrappers);
+ }
module_object_->set_export_wrappers(*export_wrappers);
}
// We can only update the feature counts once the entire compile is done.
@@ -1682,12 +1704,16 @@ class AsyncCompileJob::CompilationStateCallback {
case CompilationEvent::kFinishedBaselineCompilation:
DCHECK(!last_event_.has_value());
if (job_->DecrementAndCheckFinisherCount()) {
- // TODO(v8:6847): Also share streaming compilation result.
- if (job_->stream_ == nullptr) {
- job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
- job_->native_module_, false);
- }
- job_->DoSync<CompileFinished>();
+ // Install the native module in the cache, or reuse a conflicting one.
+ // If we get a conflicting module, wait until we are back in the
+ // main thread to update {job_->native_module_} to avoid a data race.
+ std::shared_ptr<NativeModule> native_module = job_->native_module_;
+ bool cache_hit =
+ !job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
+ false, &native_module, job_->isolate_);
+ DCHECK_EQ(cache_hit, native_module != job_->native_module_);
+ job_->DoSync<CompileFinished>(cache_hit ? std::move(native_module)
+ : nullptr);
}
break;
case CompilationEvent::kFinishedTopTierCompilation:
@@ -1698,11 +1724,8 @@ class AsyncCompileJob::CompilationStateCallback {
case CompilationEvent::kFailedCompilation:
DCHECK(!last_event_.has_value());
if (job_->DecrementAndCheckFinisherCount()) {
- // TODO(v8:6847): Also share streaming compilation result.
- if (job_->stream_ == nullptr) {
- job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
- job_->native_module_, true);
- }
+ job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
+ true, &job_->native_module_, job_->isolate_);
job_->DoSync<CompileFailed>();
}
break;
@@ -1959,29 +1982,20 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
- // TODO(v8:6847): Also share streaming compilation result.
- if (job->stream_ == nullptr) {
- auto cached_native_module =
- job->isolate_->wasm_engine()->MaybeGetNativeModule(
- module_->origin, job->wire_bytes_.module_bytes());
- if (cached_native_module != nullptr) {
- job->native_module_ = std::move(cached_native_module);
- job->PrepareRuntimeObjects();
- Handle<FixedArray> export_wrappers;
- CompileJsToWasmWrappers(job->isolate_, job->native_module_->module(),
- &export_wrappers);
- job->module_object_->set_export_wrappers(*export_wrappers);
- job->FinishCompile();
- return;
- }
+ const bool streaming = job->wire_bytes_.length() == 0;
+ if (streaming) {
+ // Streaming compilation already checked for cache hits.
+ job->CreateNativeModule(module_, code_size_estimate_);
+ } else if (job->GetOrCreateNativeModule(std::move(module_),
+ code_size_estimate_)) {
+ job->FinishCompile(true);
+ return;
}
// Make sure all compilation tasks stopped running. Decoding (async step)
// is done.
job->background_task_manager_.CancelAndWait();
- job->CreateNativeModule(module_, code_size_estimate_);
-
CompilationStateImpl* compilation_state =
Impl(job->native_module_->compilation_state());
compilation_state->AddCallback(CompilationStateCallback{job});
@@ -2027,10 +2041,7 @@ class SampleTopTierCodeSizeCallback {
: native_module_(std::move(native_module)) {}
void operator()(CompilationEvent event) {
- // This callback is registered after baseline compilation finished, so the
- // only possible event to follow is {kFinishedTopTierCompilation}.
- if (event == CompilationEvent::kFinishedRecompilation) return;
- DCHECK_EQ(CompilationEvent::kFinishedTopTierCompilation, event);
+ if (event != CompilationEvent::kFinishedTopTierCompilation) return;
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
native_module->engine()->SampleTopTierCodeSizeInAllIsolates(
native_module);
@@ -2046,20 +2057,30 @@ class SampleTopTierCodeSizeCallback {
// Step 3b (sync): Compilation finished.
//==========================================================================
class AsyncCompileJob::CompileFinished : public CompileStep {
+ public:
+ explicit CompileFinished(std::shared_ptr<NativeModule> cached_native_module)
+ : cached_native_module_(std::move(cached_native_module)) {}
+
private:
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(3b) Compilation finished\n");
- DCHECK(!job->native_module_->compilation_state()->failed());
- // Sample the generated code size when baseline compilation finished.
- job->native_module_->SampleCodeSize(job->isolate_->counters(),
- NativeModule::kAfterBaseline);
- // Also, set a callback to sample the code size after top-tier compilation
- // finished. This callback will *not* keep the NativeModule alive.
- job->native_module_->compilation_state()->AddCallback(
- SampleTopTierCodeSizeCallback{job->native_module_});
+ if (cached_native_module_) {
+ job->native_module_ = cached_native_module_;
+ } else {
+ DCHECK(!job->native_module_->compilation_state()->failed());
+ // Sample the generated code size when baseline compilation finished.
+ job->native_module_->SampleCodeSize(job->isolate_->counters(),
+ NativeModule::kAfterBaseline);
+ // Also, set a callback to sample the code size after top-tier compilation
+ // finished. This callback will *not* keep the NativeModule alive.
+ job->native_module_->compilation_state()->AddCallback(
+ SampleTopTierCodeSizeCallback{job->native_module_});
+ }
// Then finalize and publish the generated module.
- job->FinishCompile();
+ job->FinishCompile(cached_native_module_ != nullptr);
}
+
+ std::shared_ptr<NativeModule> cached_native_module_;
};
void AsyncCompileJob::FinishModule() {
@@ -2068,11 +2089,22 @@ void AsyncCompileJob::FinishModule() {
isolate_->wasm_engine()->RemoveCompileJob(this);
}
-AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
+AsyncStreamingProcessor::AsyncStreamingProcessor(
+ AsyncCompileJob* job, std::shared_ptr<Counters> async_counters,
+ AccountingAllocator* allocator)
: decoder_(job->enabled_features_),
job_(job),
wasm_engine_(job_->isolate_->wasm_engine()),
- compilation_unit_builder_(nullptr) {}
+ compilation_unit_builder_(nullptr),
+ async_counters_(async_counters),
+ allocator_(allocator) {}
+
+AsyncStreamingProcessor::~AsyncStreamingProcessor() {
+ if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
+ // Clean up the temporary cache entry.
+ job_->isolate_->wasm_engine()->StreamingCompilationFailed(prefix_hash_);
+ }
+}
void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
const WasmError& error) {
@@ -2109,6 +2141,7 @@ bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
+ prefix_hash_ = NativeModuleCache::WireBytesHash(bytes);
return true;
}
@@ -2123,17 +2156,25 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
CommitCompilationUnits();
compilation_unit_builder_.reset();
}
+ if (before_code_section_) {
+ // Combine section hashes until code section.
+ prefix_hash_ = base::hash_combine(prefix_hash_,
+ NativeModuleCache::WireBytesHash(bytes));
+ }
if (section_code == SectionCode::kUnknownSectionCode) {
- Decoder decoder(bytes, offset);
- section_code = ModuleDecoder::IdentifyUnknownSection(
- &decoder, bytes.begin() + bytes.length());
+ size_t bytes_consumed = ModuleDecoder::IdentifyUnknownSection(
+ &decoder_, bytes, offset, &section_code);
+ if (!decoder_.ok()) {
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
+ return false;
+ }
if (section_code == SectionCode::kUnknownSectionCode) {
// Skip unknown sections that we do not know how to handle.
return true;
}
// Remove the unknown section tag from the payload bytes.
- offset += decoder.position();
- bytes = bytes.SubVector(decoder.position(), bytes.size());
+ offset += bytes_consumed;
+ bytes = bytes.SubVector(bytes_consumed, bytes.size());
}
constexpr bool verify_functions = false;
decoder_.DecodeSection(section_code, bytes, offset, verify_functions);
@@ -2149,6 +2190,8 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
int num_functions, uint32_t offset,
std::shared_ptr<WireBytesStorage> wire_bytes_storage,
int code_section_length) {
+ DCHECK_LE(0, code_section_length);
+ before_code_section_ = false;
TRACE_STREAMING("Start the code section with %d functions...\n",
num_functions);
if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(num_functions),
@@ -2156,6 +2199,15 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
+
+ prefix_hash_ = base::hash_combine(prefix_hash_,
+ static_cast<uint32_t>(code_section_length));
+ if (!wasm_engine_->GetStreamingCompilationOwnership(prefix_hash_)) {
+ // Known prefix, wait until the end of the stream and check the cache.
+ prefix_cache_hit_ = true;
+ return true;
+ }
+
// Execute the PrepareAndStartCompile step immediately and not in a separate
// task.
int num_imported_functions =
@@ -2168,6 +2220,8 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
uses_liftoff);
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false, code_size_estimate);
+
+ decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
@@ -2199,14 +2253,12 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
decoder_.DecodeFunctionBody(
num_functions_, static_cast<uint32_t>(bytes.length()), offset, false);
- NativeModule* native_module = job_->native_module_.get();
- const WasmModule* module = native_module->module();
+ const WasmModule* module = decoder_.module();
auto enabled_features = job_->enabled_features_;
uint32_t func_index =
num_functions_ + decoder_.module()->num_imported_functions;
DCHECK_EQ(module->origin, kWasmOrigin);
const bool lazy_module = job_->wasm_lazy_compilation_;
-
CompileStrategy strategy =
GetCompileStrategy(module, enabled_features, func_index, lazy_module);
bool validate_lazily_compiled_function =
@@ -2214,13 +2266,11 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
(strategy == CompileStrategy::kLazy ||
strategy == CompileStrategy::kLazyBaselineEagerTopTier);
if (validate_lazily_compiled_function) {
- Counters* counters = Impl(native_module->compilation_state())->counters();
- AccountingAllocator* allocator = native_module->engine()->allocator();
-
// The native module does not own the wire bytes until {SetWireBytes} is
// called in {OnFinishedStream}. Validation must use {bytes} parameter.
- DecodeResult result = ValidateSingleFunction(
- module, func_index, bytes, counters, allocator, enabled_features);
+ DecodeResult result =
+ ValidateSingleFunction(module, func_index, bytes, async_counters_.get(),
+ allocator_, enabled_features);
if (result.failed()) {
FinishAsyncCompileJobWithError(result.error());
@@ -2228,6 +2278,13 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
}
}
+ // Don't compile yet if we might have a cache hit.
+ if (prefix_cache_hit_) {
+ num_functions_++;
+ return true;
+ }
+
+ NativeModule* native_module = job_->native_module_.get();
if (strategy == CompileStrategy::kLazy) {
native_module->UseLazyStub(func_index);
} else if (strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
@@ -2256,11 +2313,27 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
// Finish the processing of the stream.
void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
TRACE_STREAMING("Finish stream...\n");
+ DCHECK_EQ(NativeModuleCache::PrefixHash(bytes.as_vector()), prefix_hash_);
ModuleResult result = decoder_.FinishDecoding(false);
if (result.failed()) {
FinishAsyncCompileJobWithError(result.error());
return;
}
+
+ job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
+ job_->bytes_copy_ = bytes.ReleaseData();
+
+ if (prefix_cache_hit_) {
+ // Restart as an asynchronous, non-streaming compilation. Most likely
+ // {PrepareAndStartCompile} will get the native module from the cache.
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(
+ result.value().get(), FLAG_liftoff);
+ job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(
+ std::move(result).value(), true, code_size_estimate);
+ return;
+ }
+
// We have to open a HandleScope and prepare the Context for
// CreateNativeModule, PrepareRuntimeObjects and FinishCompile as this is a
// callback from the embedder.
@@ -2270,23 +2343,33 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
// Record the size of the wire bytes. In synchronous and asynchronous
// (non-streaming) compilation, this happens in {DecodeWasmModule}.
auto* histogram = job_->isolate_->counters()->wasm_wasm_module_size_bytes();
- histogram->AddSample(static_cast<int>(bytes.size()));
+ histogram->AddSample(job_->wire_bytes_.module_bytes().length());
- bool needs_finish = job_->DecrementAndCheckFinisherCount();
- if (job_->native_module_ == nullptr) {
+ const bool has_code_section = job_->native_module_ != nullptr;
+ bool cache_hit = false;
+ if (!has_code_section) {
// We are processing a WebAssembly module without code section. Create the
- // runtime objects now (would otherwise happen in {PrepareAndStartCompile}).
+ // native module now (would otherwise happen in {PrepareAndStartCompile} or
+ // {ProcessCodeSectionHeader}).
constexpr size_t kCodeSizeEstimate = 0;
- job_->CreateNativeModule(std::move(result).value(), kCodeSizeEstimate);
- DCHECK(needs_finish);
+ cache_hit = job_->GetOrCreateNativeModule(std::move(result).value(),
+ kCodeSizeEstimate);
+ } else {
+ job_->native_module_->SetWireBytes(
+ {std::move(job_->bytes_copy_), job_->wire_bytes_.length()});
}
- job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
- job_->native_module_->SetWireBytes(std::move(bytes));
+ const bool needs_finish = job_->DecrementAndCheckFinisherCount();
+ DCHECK_IMPLIES(!has_code_section, needs_finish);
if (needs_finish) {
- if (job_->native_module_->compilation_state()->failed()) {
+ const bool failed = job_->native_module_->compilation_state()->failed();
+ if (!cache_hit) {
+ cache_hit = !job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
+ failed, &job_->native_module_, job_->isolate_);
+ }
+ if (failed) {
job_->AsyncCompileFailed();
} else {
- job_->FinishCompile();
+ job_->FinishCompile(cache_hit);
}
}
}
@@ -2318,7 +2401,7 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
job_->isolate_->global_handles()->Create(*result.ToHandleChecked());
job_->native_module_ = job_->module_object_->shared_native_module();
job_->wire_bytes_ = ModuleWireBytes(job_->native_module_->wire_bytes());
- job_->FinishCompile();
+ job_->FinishCompile(false);
return true;
}
@@ -2368,7 +2451,21 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
compilation_progress_.reserve(module->num_declared_functions);
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
+
+ const bool prefer_liftoff = native_module_->IsTieredDown();
for (int func_index = start; func_index < end; func_index++) {
+ if (prefer_liftoff) {
+ constexpr uint8_t kLiftoffOnlyFunctionProgress =
+ RequiredTopTierField::update(
+ RequiredBaselineTierField::update(
+ ReachedTierField::encode(ExecutionTier::kNone),
+ ExecutionTier::kLiftoff),
+ ExecutionTier::kLiftoff);
+ compilation_progress_.push_back(kLiftoffOnlyFunctionProgress);
+ outstanding_baseline_units_++;
+ outstanding_top_tier_functions_++;
+ continue;
+ }
ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
module, compile_mode(), enabled_features, func_index);
CompileStrategy strategy =
@@ -2419,40 +2516,72 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
}
}
-void CompilationStateImpl::InitializeRecompilationProgress(ExecutionTier tier) {
+void CompilationStateImpl::InitializeRecompilation(
+ ExecutionTier tier,
+ CompilationState::callback_t recompilation_finished_callback) {
DCHECK(!failed());
- auto* module = native_module_->module();
- base::MutexGuard guard(&callbacks_mutex_);
- // Ensure that we don't trigger recompilation if another recompilation is
- // already happening.
- DCHECK_EQ(0, outstanding_recompilation_functions_);
- int start = module->num_imported_functions;
- int end = start + module->num_declared_functions;
- for (int function_index = start; function_index < end; function_index++) {
- int slot_index = function_index - start;
- DCHECK_LT(slot_index, compilation_progress_.size());
- ExecutionTier reached_tier =
- ReachedTierField::decode(compilation_progress_[slot_index]);
- if (reached_tier != tier) {
- outstanding_recompilation_functions_++;
+ // Generate necessary compilation units on the fly.
+ CompilationUnitBuilder builder(native_module_);
+
+ {
+ base::MutexGuard guard(&callbacks_mutex_);
+
+ // Restart recompilation if another recompilation is already happening.
+ outstanding_recompilation_functions_ = 0;
+ // If compilation hasn't started yet then code would be keep as tiered-down
+ // and don't need to recompile.
+ if (compilation_progress_.size() > 0) {
+ int start = native_module_->module()->num_imported_functions;
+ int end = start + native_module_->module()->num_declared_functions;
+ for (int function_index = start; function_index < end; function_index++) {
+ int slot_index = function_index - start;
+ DCHECK_LT(slot_index, compilation_progress_.size());
+ ExecutionTier reached_tier =
+ ReachedTierField::decode(compilation_progress_[slot_index]);
+ bool has_correct_tier =
+ reached_tier == tier &&
+ native_module_->HasCodeWithTier(function_index, tier);
+ compilation_progress_[slot_index] =
+ ReachedRecompilationTierField::update(
+ compilation_progress_[slot_index],
+ has_correct_tier ? tier : ExecutionTier::kNone);
+ if (!has_correct_tier) {
+ outstanding_recompilation_functions_++;
+ builder.AddRecompilationUnit(function_index, tier);
+ }
+ }
}
- }
- DCHECK_LE(0, outstanding_recompilation_functions_);
- DCHECK_LE(outstanding_recompilation_functions_,
- module->num_declared_functions);
- // Trigger callbacks if module needs no recompilation.
- if (outstanding_recompilation_functions_ == 0) {
- for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedRecompilation);
+ // Trigger callback if module needs no recompilation. Add to the list of
+ // callbacks (to be called later) otherwise.
+ if (outstanding_recompilation_functions_ == 0) {
+ recompilation_finished_callback(CompilationEvent::kFinishedRecompilation);
+ } else {
+ callbacks_.emplace_back(std::move(recompilation_finished_callback));
+ recompilation_tier_ = tier;
}
}
+
+ builder.Commit();
}
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
base::MutexGuard callbacks_guard(&callbacks_mutex_);
- callbacks_.emplace_back(std::move(callback));
+ // Immediately trigger events that already happened.
+ for (auto event : {CompilationEvent::kFinishedBaselineCompilation,
+ CompilationEvent::kFinishedTopTierCompilation,
+ CompilationEvent::kFailedCompilation}) {
+ if (finished_events_.contains(event)) {
+ callback(event);
+ }
+ }
+ constexpr base::EnumSet<CompilationEvent> kFinalEvents{
+ CompilationEvent::kFinishedTopTierCompilation,
+ CompilationEvent::kFailedCompilation};
+ if (!finished_events_.contains_any(kFinalEvents)) {
+ callbacks_.emplace_back(std::move(callback));
+ }
}
void CompilationStateImpl::AddCompilationUnits(
@@ -2511,7 +2640,8 @@ CompilationStateImpl::GetNextCompilationUnit(
return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
}
-void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
+void CompilationStateImpl::OnFinishedUnits(
+ Vector<WasmCode*> code_vector, Vector<WasmCompilationResult> results) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "OnFinishedUnits",
"num_units", code_vector.size());
@@ -2536,11 +2666,10 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
DCHECK_EQ(compilation_progress_.size(),
native_module_->module()->num_declared_functions);
- bool completes_baseline_compilation = false;
- bool completes_top_tier_compilation = false;
- bool completes_recompilation = false;
+ base::EnumSet<CompilationEvent> triggered_events;
- for (WasmCode* code : code_vector) {
+ for (size_t i = 0; i < code_vector.size(); i++) {
+ WasmCode* code = code_vector[i];
DCHECK_NOT_NULL(code);
DCHECK_LT(code->index(), native_module_->num_functions());
@@ -2548,9 +2677,6 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
// Import wrapper.
DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
outstanding_baseline_units_--;
- if (outstanding_baseline_units_ == 0) {
- completes_baseline_compilation = true;
- }
} else {
// Function.
DCHECK_NE(code->tier(), ExecutionTier::kNone);
@@ -2573,17 +2699,11 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
required_baseline_tier <= code->tier()) {
DCHECK_GT(outstanding_baseline_units_, 0);
outstanding_baseline_units_--;
- if (outstanding_baseline_units_ == 0) {
- completes_baseline_compilation = true;
- }
}
if (reached_tier < required_top_tier &&
required_top_tier <= code->tier()) {
DCHECK_GT(outstanding_top_tier_functions_, 0);
outstanding_top_tier_functions_--;
- if (outstanding_top_tier_functions_ == 0) {
- completes_top_tier_compilation = true;
- }
}
// If there is recompilation in progress, we would only count the
@@ -2591,62 +2711,75 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
// counter once a function reaches Liftoff.
if (outstanding_recompilation_functions_ > 0) {
// TODO(duongn): extend this logic for tier up.
- if (code->tier() == ExecutionTier::kLiftoff &&
- reached_tier != ExecutionTier::kLiftoff) {
+ ExecutionTier recompilation_tier =
+ ReachedRecompilationTierField::decode(function_progress);
+ if (results[i].requested_tier == recompilation_tier_ &&
+ recompilation_tier == ExecutionTier::kNone) {
+ DCHECK(code->tier() >= recompilation_tier_);
outstanding_recompilation_functions_--;
- // Update function's compilation progress.
- compilation_progress_[slot_index] = ReachedTierField::update(
- compilation_progress_[slot_index], code->tier());
+ // Update function's recompilation progress.
+ compilation_progress_[slot_index] =
+ ReachedRecompilationTierField::update(
+ compilation_progress_[slot_index], code->tier());
if (outstanding_recompilation_functions_ == 0) {
- completes_recompilation = true;
+ triggered_events.Add(CompilationEvent::kFinishedRecompilation);
}
}
- } else {
- // Update function's compilation progress.
- if (code->tier() > reached_tier) {
- compilation_progress_[slot_index] = ReachedTierField::update(
- compilation_progress_[slot_index], code->tier());
- }
+ }
+
+ // Update function's compilation progress.
+ if (code->tier() > reached_tier) {
+ compilation_progress_[slot_index] = ReachedTierField::update(
+ compilation_progress_[slot_index], code->tier());
}
DCHECK_LE(0, outstanding_baseline_units_);
}
}
- TriggerCallbacks(completes_baseline_compilation,
- completes_top_tier_compilation, completes_recompilation);
+ TriggerCallbacks(triggered_events);
}
void CompilationStateImpl::OnFinishedJSToWasmWrapperUnits(int num) {
if (num == 0) return;
base::MutexGuard guard(&callbacks_mutex_);
+ DCHECK_GE(outstanding_baseline_units_, num);
outstanding_baseline_units_ -= num;
- bool completes_baseline_compilation = outstanding_baseline_units_ == 0;
- TriggerCallbacks(completes_baseline_compilation, false);
+ TriggerCallbacks();
}
-void CompilationStateImpl::TriggerCallbacks(bool completes_baseline_compilation,
- bool completes_top_tier_compilation,
- bool completes_recompilation) {
- if (completes_recompilation) {
- for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedRecompilation);
- }
- }
- if (completes_baseline_compilation) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
- for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedBaselineCompilation);
- }
+void CompilationStateImpl::TriggerCallbacks(
+ base::EnumSet<CompilationEvent> triggered_events) {
+ DCHECK(!callbacks_mutex_.TryLock());
+
+ if (outstanding_baseline_units_ == 0) {
+ triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation);
if (outstanding_top_tier_functions_ == 0) {
- completes_top_tier_compilation = true;
+ triggered_events.Add(CompilationEvent::kFinishedTopTierCompilation);
}
}
- if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
+
+ if (triggered_events.empty()) return;
+
+ // Don't trigger past events again.
+ triggered_events -= finished_events_;
+ // Recompilation can happen multiple times, thus do not store this.
+ finished_events_ |=
+ triggered_events - CompilationEvent::kFinishedRecompilation;
+
+ for (auto event :
+ {std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
+ "BaselineFinished"),
+ std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
+ "TopTierFinished"),
+ std::make_pair(CompilationEvent::kFinishedRecompilation,
+ "RecompilationFinished")}) {
+ if (!triggered_events.contains(event.first)) continue;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), event.second);
for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedTopTierCompilation);
+ callback(event.first);
}
}
+
if (outstanding_baseline_units_ == 0 &&
outstanding_top_tier_functions_ == 0 &&
outstanding_recompilation_functions_ == 0) {
@@ -2828,7 +2961,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
WasmCode* CompileImportWrapper(
WasmEngine* wasm_engine, NativeModule* native_module, Counters* counters,
- compiler::WasmImportCallKind kind, FunctionSig* sig,
+ compiler::WasmImportCallKind kind, const FunctionSig* sig,
WasmImportWrapperCache::ModificationScope* cache_scope) {
// Entry should exist, so that we don't insert a new one and invalidate
// other threads' iterators/references, but it should not have been compiled
@@ -2843,8 +2976,9 @@ WasmCode* CompileImportWrapper(
wasm_engine, &env, kind, sig, source_positions);
std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), GetCodeKind(result),
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
ExecutionTier::kNone);
WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
(*cache_scope)[key] = published_code;
@@ -2856,7 +2990,7 @@ WasmCode* CompileImportWrapper(
}
Handle<Script> CreateWasmScript(Isolate* isolate,
- const ModuleWireBytes& wire_bytes,
+ Vector<const uint8_t> wire_bytes,
Vector<const char> source_map_url,
WireBytesRef name,
Vector<const char> source_url) {
@@ -2867,8 +3001,8 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
script->set_type(Script::TYPE_WASM);
int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(wire_bytes.start()),
- static_cast<int>(wire_bytes.length()), kZeroHashSeed);
+ reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
+ kZeroHashSeed);
const int kBufferSize = 32;
char buffer[kBufferSize];
@@ -2887,7 +3021,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
.ToHandleChecked();
Handle<String> module_name =
WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate, wire_bytes.module_bytes(), name, kNoInternalize);
+ isolate, wire_bytes, name, kNoInternalize);
name_str = isolate->factory()
->NewConsString(module_name, name_hash)
.ToHandleChecked();
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index b60c12702f..4c6acd9aa9 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -57,11 +57,11 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
V8_EXPORT_PRIVATE
WasmCode* CompileImportWrapper(
WasmEngine* wasm_engine, NativeModule* native_module, Counters* counters,
- compiler::WasmImportCallKind kind, FunctionSig* sig,
+ compiler::WasmImportCallKind kind, const FunctionSig* sig,
WasmImportWrapperCache::ModificationScope* cache_scope);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
- Isolate* isolate, const ModuleWireBytes& wire_bytes,
+ Isolate* isolate, Vector<const uint8_t> wire_bytes,
Vector<const char> source_map_url, WireBytesRef name,
Vector<const char> source_url = {});
@@ -149,9 +149,12 @@ class AsyncCompileJob {
void CreateNativeModule(std::shared_ptr<const WasmModule> module,
size_t code_size_estimate);
+ // Return true for cache hit, false for cache miss.
+ bool GetOrCreateNativeModule(std::shared_ptr<const WasmModule> module,
+ size_t code_size_estimate);
void PrepareRuntimeObjects();
- void FinishCompile();
+ void FinishCompile(bool is_after_cache_hit);
void DecodeFailed(const WasmError&);
void AsyncCompileFailed();
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index bc8260cd9b..58be26f845 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -149,6 +149,42 @@ WireBytesRef consume_string(Decoder* decoder, bool validate_utf8,
return {offset, decoder->failed() ? 0 : length};
}
+namespace {
+SectionCode IdentifyUnknownSectionInternal(Decoder* decoder) {
+ WireBytesRef string = consume_string(decoder, true, "section name");
+ if (decoder->failed()) {
+ return kUnknownSectionCode;
+ }
+ const byte* section_name_start =
+ decoder->start() + decoder->GetBufferRelativeOffset(string.offset());
+
+ TRACE(" +%d section name : \"%.*s\"\n",
+ static_cast<int>(section_name_start - decoder->start()),
+ string.length() < 20 ? string.length() : 20, section_name_start);
+
+ if (string.length() == num_chars(kNameString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start), kNameString,
+ num_chars(kNameString)) == 0) {
+ return kNameSectionCode;
+ } else if (string.length() == num_chars(kSourceMappingURLString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kSourceMappingURLString,
+ num_chars(kSourceMappingURLString)) == 0) {
+ return kSourceMappingURLSectionCode;
+ } else if (string.length() == num_chars(kCompilationHintsString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kCompilationHintsString,
+ num_chars(kCompilationHintsString)) == 0) {
+ return kCompilationHintsSectionCode;
+ } else if (string.length() == num_chars(kDebugInfoString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kDebugInfoString, num_chars(kDebugInfoString)) == 0) {
+ return kDebugInfoSectionCode;
+ }
+ return kUnknownSectionCode;
+}
+} // namespace
+
// An iterator over the sections in a wasm binary module.
// Automatically skips all unknown sections.
class WasmSectionIterator {
@@ -232,8 +268,13 @@ class WasmSectionIterator {
if (section_code == kUnknownSectionCode) {
// Check for the known "name", "sourceMappingURL", or "compilationHints"
// section.
- section_code =
- ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
+ // To identify the unknown section we set the end of the decoder bytes to
+ // the end of the custom section, so that we do not read the section name
+ // beyond the end of the section.
+ const byte* module_end = decoder_->end();
+ decoder_->set_end(section_end_);
+ section_code = IdentifyUnknownSectionInternal(decoder_);
+ if (decoder_->ok()) decoder_->set_end(module_end);
// As a side effect, the above function will forward the decoder to after
// the identifier string.
payload_start_ = decoder_->pc();
@@ -272,6 +313,8 @@ class ModuleDecoderImpl : public Decoder {
error(start_, "end is less than start");
end_ = start_;
}
+ module_start_ = module_start;
+ module_end_ = module_end;
}
void onFirstError() override {
@@ -367,6 +410,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
+ VerifyFunctionDeclarations(section_code);
if (failed()) return;
Reset(bytes, offset);
TRACE("Section: %s\n", SectionName(section_code));
@@ -390,8 +434,8 @@ class ModuleDecoderImpl : public Decoder {
break;
case kExceptionSectionCode:
if (!CheckUnorderedSection(section_code)) return;
- if (!CheckSectionOrder(section_code, kGlobalSectionCode,
- kExportSectionCode))
+ if (!CheckSectionOrder(section_code, kMemorySectionCode,
+ kGlobalSectionCode))
return;
break;
case kNameSectionCode:
@@ -510,7 +554,7 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- FunctionSig* s = consume_sig(module_->signature_zone.get());
+ const FunctionSig* s = consume_sig(module_->signature_zone.get());
module_->signatures.push_back(s);
uint32_t id = s ? module_->signature_map.FindOrInsert(*s) : 0;
module_->signature_ids.push_back(id);
@@ -548,7 +592,8 @@ class ModuleDecoderImpl : public Decoder {
0, // sig_index
{0, 0}, // code
true, // imported
- false}); // exported
+ false, // exported
+ false}); // declared
WasmFunction* function = &module_->functions.back();
function->sig_index =
consume_sig_index(module_.get(), &function->sig);
@@ -582,9 +627,9 @@ class ModuleDecoderImpl : public Decoder {
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
- "memory", "pages", kSpecMaxWasmMemoryPages,
+ "memory", "pages", max_initial_mem_pages(),
&module_->initial_pages, &module_->has_maximum_pages,
- kSpecMaxWasmMemoryPages, &module_->maximum_pages, flags);
+ max_maximum_mem_pages(), &module_->maximum_pages, flags);
break;
}
case kExternalGlobal: {
@@ -607,7 +652,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
import->index = static_cast<uint32_t>(module_->exceptions.size());
- WasmExceptionSig* exception_sig = nullptr;
+ const WasmExceptionSig* exception_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
consume_exception_sig_index(module_.get(), &exception_sig);
module_->exceptions.emplace_back(exception_sig);
@@ -638,7 +683,8 @@ class ModuleDecoderImpl : public Decoder {
0, // sig_index
{0, 0}, // code
false, // imported
- false}); // exported
+ false, // exported
+ false}); // declared
WasmFunction* function = &module_->functions.back();
function->sig_index = consume_sig_index(module_.get(), &function->sig);
if (!ok()) return;
@@ -673,8 +719,8 @@ class ModuleDecoderImpl : public Decoder {
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
consume_resizable_limits(
- "memory", "pages", kSpecMaxWasmMemoryPages, &module_->initial_pages,
- &module_->has_maximum_pages, kSpecMaxWasmMemoryPages,
+ "memory", "pages", max_initial_mem_pages(), &module_->initial_pages,
+ &module_->has_maximum_pages, max_maximum_mem_pages(),
&module_->maximum_pages, flags);
}
}
@@ -806,43 +852,43 @@ class ModuleDecoderImpl : public Decoder {
uint32_t element_count =
consume_count("element count", FLAG_wasm_max_table_size);
- if (element_count > 0 && module_->tables.size() == 0) {
- error(pc_, "The element section requires a table");
- }
for (uint32_t i = 0; ok() && i < element_count; ++i) {
const byte* pos = pc();
- bool is_active;
+ WasmElemSegment::Status status;
bool functions_as_elements;
uint32_t table_index;
WasmInitExpr offset;
- consume_element_segment_header(&is_active, &functions_as_elements,
+ ValueType type = kWasmBottom;
+ consume_element_segment_header(&status, &functions_as_elements, &type,
&table_index, &offset);
if (failed()) return;
+ DCHECK_NE(type, kWasmBottom);
- if (is_active) {
+ if (status == WasmElemSegment::kStatusActive) {
if (table_index >= module_->tables.size()) {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (!ValueTypes::IsSubType(kWasmFuncRef,
- module_->tables[table_index].type)) {
+ if (!type.IsSubTypeOf(module_->tables[table_index].type)) {
errorf(pos,
- "Invalid element segment. Table %u is not of type FuncRef",
- table_index);
+ "Invalid element segment. Table %u is not a super-type of %s",
+ table_index, type.type_name());
break;
}
}
uint32_t num_elem =
consume_count("number of elements", max_table_init_entries());
- if (is_active) {
+ if (status == WasmElemSegment::kStatusActive) {
module_->elem_segments.emplace_back(table_index, offset);
} else {
- module_->elem_segments.emplace_back();
+ module_->elem_segments.emplace_back(
+ status == WasmElemSegment::kStatusDeclarative);
}
WasmElemSegment* init = &module_->elem_segments.back();
+ init->type = type;
for (uint32_t j = 0; j < num_elem; j++) {
uint32_t index = functions_as_elements ? consume_element_expr()
: consume_element_func_index();
@@ -869,6 +915,8 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) break;
DecodeFunctionBody(i, size, offset, verify_functions);
}
+ DCHECK_GE(pc_offset(), pos);
+ set_code_section(pos, pc_offset() - pos);
}
bool CheckFunctionsCount(uint32_t functions_count, uint32_t offset) {
@@ -887,7 +935,7 @@ class ModuleDecoderImpl : public Decoder {
&module_->functions[index + module_->num_imported_functions];
function->code = {offset, length};
if (verify_functions) {
- ModuleWireBytes bytes(start_, end_);
+ ModuleWireBytes bytes(module_start_, module_end_);
VerifyFunctionBody(module_->signature_zone->allocator(),
index + module_->num_imported_functions, bytes,
module_.get(), function);
@@ -912,10 +960,6 @@ class ModuleDecoderImpl : public Decoder {
module_->data_segments.reserve(data_segments_count);
for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
const byte* pos = pc();
- if (!module_->has_memory) {
- error("cannot load data without memory");
- break;
- }
TRACE("DecodeDataSegment[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
@@ -925,9 +969,15 @@ class ModuleDecoderImpl : public Decoder {
consume_data_segment_header(&is_active, &memory_index, &dest_addr);
if (failed()) break;
- if (is_active && memory_index != 0) {
- errorf(pos, "illegal memory index %u != 0", memory_index);
- break;
+ if (is_active) {
+ if (!module_->has_memory) {
+ error("cannot load data without memory");
+ break;
+ }
+ if (memory_index != 0) {
+ errorf(pos, "illegal memory index %u != 0", memory_index);
+ break;
+ }
}
uint32_t source_length = consume_u32v("source size");
@@ -1084,7 +1134,7 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < exception_count; ++i) {
TRACE("DecodeException[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- WasmExceptionSig* exception_sig = nullptr;
+ const WasmExceptionSig* exception_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
consume_exception_sig_index(module_.get(), &exception_sig);
module_->exceptions.emplace_back(exception_sig);
@@ -1114,7 +1164,41 @@ class ModuleDecoderImpl : public Decoder {
return true;
}
+ void VerifyFunctionDeclarations(SectionCode section_code) {
+ // Since we will only know if a function was properly declared after all the
+ // element sections have been parsed, but we need to verify the proper use
+ // within global initialization, we are deferring those checks.
+ if (deferred_funcref_error_offsets_.empty()) {
+ // No verifications to do be done.
+ return;
+ }
+ if (!ok()) {
+ // Previous errors exist.
+ return;
+ }
+ // TODO(ecmziegler): Adjust logic if module order changes (e.g. event
+ // section).
+ if (section_code <= kElementSectionCode &&
+ section_code != kUnknownSectionCode) {
+ // Before the element section and not at end of decoding.
+ return;
+ }
+ for (auto& func_offset : deferred_funcref_error_offsets_) {
+ DCHECK_LT(func_offset.first, module_->functions.size());
+ if (!module_->functions[func_offset.first].declared) {
+ errorf(func_offset.second, "undeclared reference to function #%u",
+ func_offset.first);
+ break;
+ }
+ }
+ deferred_funcref_error_offsets_.clear();
+ }
+
ModuleResult FinishDecoding(bool verify_functions = true) {
+ // Ensure that function verifications were done even if no section followed
+ // the global section.
+ VerifyFunctionDeclarations(kUnknownSectionCode);
+
if (ok() && CheckMismatchedCounts()) {
CalculateGlobalOffsets(module_.get());
}
@@ -1126,6 +1210,10 @@ class ModuleDecoderImpl : public Decoder {
return result;
}
+ void set_code_section(uint32_t offset, uint32_t size) {
+ module_->code = {offset, size};
+ }
+
// Decodes an entire module.
ModuleResult DecodeModule(Counters* counters, AccountingAllocator* allocator,
bool verify_functions = true) {
@@ -1184,9 +1272,9 @@ class ModuleDecoderImpl : public Decoder {
}
// Decodes a single function signature at {start}.
- FunctionSig* DecodeFunctionSignature(Zone* zone, const byte* start) {
+ const FunctionSig* DecodeFunctionSignature(Zone* zone, const byte* start) {
pc_ = start;
- FunctionSig* result = consume_sig(zone);
+ const FunctionSig* result = consume_sig(zone);
return ok() ? result : nullptr;
}
@@ -1210,6 +1298,8 @@ class ModuleDecoderImpl : public Decoder {
private:
const WasmFeatures enabled_features_;
std::shared_ptr<WasmModule> module_;
+ const byte* module_start_;
+ const byte* module_end_;
Counters* counters_ = nullptr;
// The type section is the first section in a module.
uint8_t next_ordered_section_ = kFirstSectionInModule;
@@ -1225,6 +1315,10 @@ class ModuleDecoderImpl : public Decoder {
kLastKnownModuleSection,
"not enough bits");
WasmError intermediate_error_;
+ // Map from function index to wire byte offset of first funcref initialization
+ // in global section. Used for deferred checking and proper error reporting if
+ // these were not properly declared in the element section.
+ std::unordered_map<uint32_t, int> deferred_funcref_error_offsets_;
ModuleOrigin origin_;
bool has_seen_unordered_section(SectionCode section_code) {
@@ -1277,14 +1371,14 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos,
"type mismatch in global initialization "
"(from global #%u), expected %s, got %s",
- other_index, ValueTypes::TypeName(global->type),
- ValueTypes::TypeName(module->globals[other_index].type));
+ other_index, global->type.type_name(),
+ module->globals[other_index].type.type_name());
}
} else {
- if (!ValueTypes::IsSubType(TypeOf(module, global->init), global->type)) {
+ if (!TypeOf(module, global->init).IsSubTypeOf(global->type)) {
errorf(pos, "type error in global initialization, expected %s, got %s",
- ValueTypes::TypeName(global->type),
- ValueTypes::TypeName(TypeOf(module, global->init)));
+ global->type.type_name(),
+ TypeOf(module, global->init).type_name());
}
}
}
@@ -1297,13 +1391,12 @@ class ModuleDecoderImpl : public Decoder {
for (WasmGlobal& global : module->globals) {
if (global.mutability && global.imported) {
global.index = num_imported_mutable_globals++;
- } else if (ValueTypes::IsReferenceType(global.type)) {
+ } else if (global.type.IsReferenceType()) {
global.offset = tagged_offset;
// All entries in the tagged_globals_buffer have size 1.
tagged_offset++;
} else {
- byte size =
- ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
+ int size = global.type.element_size_bytes();
untagged_offset = (untagged_offset + size - 1) & ~(size - 1); // align
global.offset = untagged_offset;
untagged_offset += size;
@@ -1327,16 +1420,9 @@ class ModuleDecoderImpl : public Decoder {
start_ + GetBufferRelativeOffset(function->code.offset()),
start_ + GetBufferRelativeOffset(function->code.end_offset())};
- DecodeResult result;
- {
- auto time_counter = SELECT_WASM_COUNTER(GetCounters(), origin_,
- wasm_decode, function_time);
-
- TimedHistogramScope wasm_decode_function_time_scope(time_counter);
- WasmFeatures unused_detected_features = WasmFeatures::None();
- result = VerifyWasmCode(allocator, enabled_features_, module,
- &unused_detected_features, body);
- }
+ WasmFeatures unused_detected_features = WasmFeatures::None();
+ DecodeResult result = VerifyWasmCode(allocator, enabled_features_, module,
+ &unused_detected_features, body);
// If the decode failed and this is the first error, set error code and
// location.
@@ -1349,7 +1435,7 @@ class ModuleDecoderImpl : public Decoder {
}
}
- uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
+ uint32_t consume_sig_index(WasmModule* module, const FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
if (sig_index >= module->signatures.size()) {
@@ -1362,7 +1448,8 @@ class ModuleDecoderImpl : public Decoder {
return sig_index;
}
- uint32_t consume_exception_sig_index(WasmModule* module, FunctionSig** sig) {
+ uint32_t consume_exception_sig_index(WasmModule* module,
+ const FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_sig_index(module, sig);
if (*sig && (*sig)->return_count() != 0) {
@@ -1560,6 +1647,8 @@ class ModuleDecoderImpl : public Decoder {
errorf(pc() - 1, "invalid function index: %u", imm.index);
break;
}
+ // Defer check for declaration of function reference.
+ deferred_funcref_error_offsets_.emplace(imm.index, pc_offset());
expr.kind = WasmInitExpr::kRefFuncConst;
expr.val.function_index = imm.index;
len = imm.length;
@@ -1579,8 +1668,7 @@ class ModuleDecoderImpl : public Decoder {
}
if (expected != kWasmStmt && TypeOf(module, expr) != kWasmI32) {
errorf(pos, "type error in init expression, expected %s, got %s",
- ValueTypes::TypeName(expected),
- ValueTypes::TypeName(TypeOf(module, expr)));
+ expected.type_name(), TypeOf(module, expr).type_name());
}
return expr;
}
@@ -1665,7 +1753,7 @@ class ModuleDecoderImpl : public Decoder {
return kWasmStmt;
}
- FunctionSig* consume_sig(Zone* zone) {
+ const FunctionSig* consume_sig(Zone* zone) {
if (!expect_u8("type form", kWasmFunctionTypeCode)) return nullptr;
// parse parameter types
uint32_t param_count =
@@ -1710,9 +1798,9 @@ class ModuleDecoderImpl : public Decoder {
return attribute;
}
- void consume_element_segment_header(bool* is_active,
+ void consume_element_segment_header(WasmElemSegment::Status* status,
bool* functions_as_elements,
- uint32_t* table_index,
+ ValueType* type, uint32_t* table_index,
WasmInitExpr* offset) {
const byte* pos = pc();
uint8_t flag;
@@ -1743,11 +1831,28 @@ class ModuleDecoderImpl : public Decoder {
kIsPassiveMask | kHasTableIndexMask | kFunctionsAsElementsMask;
bool is_passive = flag & kIsPassiveMask;
- *is_active = !is_passive;
+ if (!is_passive) {
+ *status = WasmElemSegment::kStatusActive;
+ if (module_->tables.size() == 0) {
+ error(pc_, "Active element sections require a table");
+ }
+ } else if ((flag & kHasTableIndexMask)) { // Special bit combination for
+ // declarative segments.
+ *status = WasmElemSegment::kStatusDeclarative;
+ } else {
+ *status = WasmElemSegment::kStatusPassive;
+ }
*functions_as_elements = flag & kFunctionsAsElementsMask;
- bool has_table_index = flag & kHasTableIndexMask;
+ bool has_table_index = (flag & kHasTableIndexMask) &&
+ *status == WasmElemSegment::kStatusActive;
- if (is_passive && !enabled_features_.has_bulk_memory()) {
+ if (*status == WasmElemSegment::kStatusDeclarative &&
+ !enabled_features_.has_anyref()) {
+ error("Declarative element segments require --experimental-wasm-anyref");
+ return;
+ }
+ if (*status == WasmElemSegment::kStatusPassive &&
+ !enabled_features_.has_bulk_memory()) {
error("Passive element segments require --experimental-wasm-bulk-memory");
return;
}
@@ -1764,8 +1869,8 @@ class ModuleDecoderImpl : public Decoder {
"--experimental-wasm-bulk-memory or --experimental-wasm-anyref?");
return;
}
- if ((flag & kFullMask) != flag || (!(*is_active) && has_table_index)) {
- errorf(pos, "illegal flag value %u. Must be 0, 1, 2, 4, 5 or 6", flag);
+ if ((flag & kFullMask) != flag) {
+ errorf(pos, "illegal flag value %u. Must be between 0 and 7", flag);
}
if (has_table_index) {
@@ -1774,25 +1879,20 @@ class ModuleDecoderImpl : public Decoder {
*table_index = 0;
}
- if (*is_active) {
+ if (*status == WasmElemSegment::kStatusActive) {
*offset = consume_init_expr(module_.get(), kWasmI32);
}
- if (*is_active && !has_table_index) {
+ if (*status == WasmElemSegment::kStatusActive && !has_table_index) {
// Active segments without table indices are a special case for backwards
// compatibility. These cases have an implicit element kind or element
// type, so we are done already with the segment header.
+ *type = kWasmFuncRef;
return;
}
if (*functions_as_elements) {
- // We have to check that there is an element type of type FuncRef. All
- // other element types are not valid yet.
- ValueType type = consume_reference_type();
- if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
- error(pc_ - 1, "invalid element segment type");
- return;
- }
+ *type = consume_reference_type();
} else {
// We have to check that there is an element kind of type Function. All
// other element kinds are not valid yet.
@@ -1802,6 +1902,7 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "illegal element kind %x. Must be 0x00", val);
return;
}
+ *type = kWasmFuncRef;
}
}
@@ -1853,6 +1954,7 @@ class ModuleDecoderImpl : public Decoder {
uint32_t index =
consume_func_index(module_.get(), &func, "element function index");
if (failed()) return index;
+ func->declared = true;
DCHECK_NE(func, nullptr);
DCHECK_EQ(index, func->func_index);
DCHECK_NE(index, WasmElemSegment::kNullIndex);
@@ -1885,9 +1987,6 @@ ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
bool verify_functions, ModuleOrigin origin,
Counters* counters,
AccountingAllocator* allocator) {
- auto counter =
- SELECT_WASM_COUNTER(counters, origin, wasm_decode, module_time);
- TimedHistogramScope wasm_decode_module_time_scope(counter);
size_t size = module_end - module_start;
CHECK_LE(module_start, module_end);
if (size >= kV8MaxWasmModuleSize) {
@@ -1901,19 +2000,7 @@ ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
ModuleDecoderImpl decoder(enabled, module_start, module_end, origin);
- ModuleResult result =
- decoder.DecodeModule(counters, allocator, verify_functions);
- // TODO(bradnelson): Improve histogram handling of size_t.
- // TODO(titzer): this isn't accurate, since it doesn't count the data
- // allocated on the C++ heap.
- // https://bugs.chromium.org/p/chromium/issues/detail?id=657320
- if (result.ok()) {
- auto peak_counter = SELECT_WASM_COUNTER(counters, origin, wasm_decode,
- module_peak_memory_bytes);
- peak_counter->AddSample(
- static_cast<int>(result.value()->signature_zone->allocation_size()));
- }
- return result;
+ return decoder.DecodeModule(counters, allocator, verify_functions);
}
ModuleDecoder::ModuleDecoder(const WasmFeatures& enabled)
@@ -1958,46 +2045,25 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
return impl_->FinishDecoding(verify_functions);
}
-SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder,
- const byte* end) {
- WireBytesRef string = consume_string(decoder, true, "section name");
- if (decoder->failed() || decoder->pc() > end) {
- return kUnknownSectionCode;
- }
- const byte* section_name_start =
- decoder->start() + decoder->GetBufferRelativeOffset(string.offset());
-
- TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - decoder->start()),
- string.length() < 20 ? string.length() : 20, section_name_start);
+void ModuleDecoder::set_code_section(uint32_t offset, uint32_t size) {
+ return impl_->set_code_section(offset, size);
+}
- if (string.length() == num_chars(kNameString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start), kNameString,
- num_chars(kNameString)) == 0) {
- return kNameSectionCode;
- } else if (string.length() == num_chars(kSourceMappingURLString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kSourceMappingURLString,
- num_chars(kSourceMappingURLString)) == 0) {
- return kSourceMappingURLSectionCode;
- } else if (string.length() == num_chars(kCompilationHintsString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kCompilationHintsString,
- num_chars(kCompilationHintsString)) == 0) {
- return kCompilationHintsSectionCode;
- } else if (string.length() == num_chars(kDebugInfoString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kDebugInfoString, num_chars(kDebugInfoString)) == 0) {
- return kDebugInfoSectionCode;
- }
- return kUnknownSectionCode;
+size_t ModuleDecoder::IdentifyUnknownSection(ModuleDecoder* decoder,
+ Vector<const uint8_t> bytes,
+ uint32_t offset,
+ SectionCode* result) {
+ if (!decoder->ok()) return 0;
+ decoder->impl_->Reset(bytes, offset);
+ *result = IdentifyUnknownSectionInternal(decoder->impl_.get());
+ return decoder->impl_->pc() - bytes.begin();
}
bool ModuleDecoder::ok() { return impl_->ok(); }
-FunctionSig* DecodeWasmSignatureForTesting(const WasmFeatures& enabled,
- Zone* zone, const byte* start,
- const byte* end) {
+const FunctionSig* DecodeWasmSignatureForTesting(const WasmFeatures& enabled,
+ Zone* zone, const byte* start,
+ const byte* end) {
ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
return decoder.DecodeFunctionSignature(zone, start);
}
@@ -2143,36 +2209,79 @@ bool FindNameSection(Decoder* decoder) {
} // namespace
void DecodeFunctionNames(const byte* module_start, const byte* module_end,
- std::unordered_map<uint32_t, WireBytesRef>* names) {
+ std::unordered_map<uint32_t, WireBytesRef>* names,
+ const Vector<const WasmExport> export_table) {
DCHECK_NOT_NULL(names);
DCHECK(names->empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(&decoder)) return;
+ if (FindNameSection(&decoder)) {
+ while (decoder.ok() && decoder.more()) {
+ uint8_t name_type = decoder.consume_u8("name type");
+ if (name_type & 0x80) break; // no varuint7
- while (decoder.ok() && decoder.more()) {
- uint8_t name_type = decoder.consume_u8("name type");
- if (name_type & 0x80) break; // no varuint7
+ uint32_t name_payload_len = decoder.consume_u32v("name payload length");
+ if (!decoder.checkAvailable(name_payload_len)) break;
- uint32_t name_payload_len = decoder.consume_u32v("name payload length");
- if (!decoder.checkAvailable(name_payload_len)) break;
+ if (name_type != NameSectionKindCode::kFunction) {
+ decoder.consume_bytes(name_payload_len, "name subsection payload");
+ continue;
+ }
+ uint32_t functions_count = decoder.consume_u32v("functions count");
- if (name_type != NameSectionKindCode::kFunction) {
- decoder.consume_bytes(name_payload_len, "name subsection payload");
- continue;
+ for (; decoder.ok() && functions_count > 0; --functions_count) {
+ uint32_t function_index = decoder.consume_u32v("function index");
+ WireBytesRef name = consume_string(&decoder, false, "function name");
+
+ // Be lenient with errors in the name section: Ignore non-UTF8 names.
+ // You can even assign to the same function multiple times (last valid
+ // one wins).
+ if (decoder.ok() && validate_utf8(&decoder, name)) {
+ names->insert(std::make_pair(function_index, name));
+ }
+ }
+ }
+ }
+
+ // Extract from export table.
+ for (const WasmExport& exp : export_table) {
+ switch (exp.kind) {
+ case kExternalFunction:
+ if (names->count(exp.index) == 0) {
+ names->insert(std::make_pair(exp.index, exp.name));
+ }
+ break;
+ default:
+ break;
}
- uint32_t functions_count = decoder.consume_u32v("functions count");
+ }
+}
+
+void DecodeGlobalNames(
+ const Vector<const WasmImport> import_table,
+ const Vector<const WasmExport> export_table,
+ std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>*
+ names) {
+ DCHECK_NOT_NULL(names);
+ DCHECK(names->empty());
- for (; decoder.ok() && functions_count > 0; --functions_count) {
- uint32_t function_index = decoder.consume_u32v("function index");
- WireBytesRef name = consume_string(&decoder, false, "function name");
+ // Extract from import table.
+ for (const WasmImport& imp : import_table) {
+ if (imp.kind != kExternalGlobal) continue;
+ if (!imp.module_name.is_set() || !imp.field_name.is_set()) continue;
+ if (names->count(imp.index) == 0) {
+ names->insert(std::make_pair(
+ imp.index, std::make_pair(imp.module_name, imp.field_name)));
+ }
+ }
- // Be lenient with errors in the name section: Ignore non-UTF8 names. You
- // can even assign to the same function multiple times (last valid one
- // wins).
- if (decoder.ok() && validate_utf8(&decoder, name)) {
- names->insert(std::make_pair(function_index, name));
- }
+ // Extract from export table.
+ for (const WasmExport& exp : export_table) {
+ if (exp.kind != kExternalGlobal) continue;
+ if (!exp.name.is_set()) continue;
+ if (names->count(exp.index) == 0) {
+ names->insert(
+ std::make_pair(exp.index, std::make_pair(WireBytesRef(), exp.name)));
}
}
}
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index da35c0c4f5..6feeebb41d 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -133,7 +133,7 @@ V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
-V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(
+V8_EXPORT_PRIVATE const FunctionSig* DecodeWasmSignatureForTesting(
const WasmFeatures& enabled, Zone* zone, const byte* start,
const byte* end);
@@ -160,11 +160,20 @@ V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
// function.
AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets);
-// Decode the function names from the name section.
-// Returns the result as an unordered map. Only names with valid utf8 encoding
-// are stored and conflicts are resolved by choosing the last name read.
+// Decode the function names from the name section and also look at export
+// table. Returns the result as an unordered map. Only names with valid utf8
+// encoding are stored and conflicts are resolved by choosing the last name
+// read.
void DecodeFunctionNames(const byte* module_start, const byte* module_end,
- std::unordered_map<uint32_t, WireBytesRef>* names);
+ std::unordered_map<uint32_t, WireBytesRef>* names,
+ const Vector<const WasmExport> export_table);
+
+// Decode the global names from import table and export table. Returns the
+// result as an unordered map.
+void DecodeGlobalNames(
+ const Vector<const WasmImport> import_table,
+ const Vector<const WasmExport> export_table,
+ std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>* names);
// Decode the local names assignment from the name section.
// The result will be empty if no name section is present. On encountering an
@@ -194,6 +203,8 @@ class ModuleDecoder {
ModuleResult FinishDecoding(bool verify_functions = true);
+ void set_code_section(uint32_t offset, uint32_t size);
+
const std::shared_ptr<WasmModule>& shared_module() const;
WasmModule* module() const { return shared_module().get(); }
@@ -203,10 +214,10 @@ class ModuleDecoder {
// SectionCode if the unknown section is known to decoder.
// The decoder is expected to point after the section length and just before
// the identifier string of the unknown section.
- // If a SectionCode other than kUnknownSectionCode is returned, the decoder
- // will point right after the identifier string. Otherwise, the position is
- // undefined.
- static SectionCode IdentifyUnknownSection(Decoder* decoder, const byte* end);
+ // The return value is the number of bytes that were consumed.
+ static size_t IdentifyUnknownSection(ModuleDecoder* decoder,
+ Vector<const uint8_t> bytes,
+ uint32_t offset, SectionCode* result);
private:
const WasmFeatures enabled_features_;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 6d40e9ed5e..08bd8ff871 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -482,7 +482,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Check that indirect function table segments are within bounds.
//--------------------------------------------------------------------------
for (const WasmElemSegment& elem_segment : module_->elem_segments) {
- if (!elem_segment.active) continue;
+ if (elem_segment.status != WasmElemSegment::kStatusActive) continue;
DCHECK_LT(elem_segment.table_index, table_count);
uint32_t base = EvalUint32InitExpr(instance, elem_segment.offset);
// Because of imported tables, {table_size} has to come from the table
@@ -669,11 +669,8 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
}
// No need to copy empty segments.
if (size == 0) continue;
- Address dest_addr =
- reinterpret_cast<Address>(instance->memory_start()) + dest_offset;
- Address src_addr = reinterpret_cast<Address>(wire_bytes.begin()) +
- segment.source.offset();
- memory_copy_wrapper(dest_addr, src_addr, size);
+ std::memcpy(instance->memory_start() + dest_offset,
+ wire_bytes.begin() + segment.source.offset(), size);
} else {
DCHECK(segment.active);
// Segments of size == 0 are just nops.
@@ -691,22 +688,22 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- ValueTypes::TypeName(global.type));
- switch (global.type) {
- case kWasmI32:
+ global.type.type_name());
+ switch (global.type.kind()) {
+ case ValueType::kI32:
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
DoubleToInt32(num));
break;
- case kWasmI64:
+ case ValueType::kI64:
// The Wasm-BigInt proposal currently says that i64 globals may
// only be initialized with BigInts. See:
// https://github.com/WebAssembly/JS-BigInt-integration/issues/12
UNREACHABLE();
- case kWasmF32:
+ case ValueType::kF32:
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
DoubleToFloat32(num));
break;
- case kWasmF64:
+ case ValueType::kF64:
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
break;
default:
@@ -717,7 +714,7 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, int64_t num) {
TRACE("init [globals_start=%p + %u] = %" PRId64 ", type = %s\n",
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- ValueTypes::TypeName(global.type));
+ global.type.type_name());
DCHECK_EQ(kWasmI64, global.type);
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
}
@@ -726,44 +723,45 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
Handle<WasmGlobalObject> value) {
TRACE("init [globals_start=%p + %u] = ", raw_buffer_ptr(untagged_globals_, 0),
global.offset);
- switch (global.type) {
- case kWasmI32: {
+ switch (global.type.kind()) {
+ case ValueType::kI32: {
int32_t num = value->GetI32();
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
TRACE("%d", num);
break;
}
- case kWasmI64: {
+ case ValueType::kI64: {
int64_t num = value->GetI64();
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
TRACE("%" PRId64, num);
break;
}
- case kWasmF32: {
+ case ValueType::kF32: {
float num = value->GetF32();
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
TRACE("%f", num);
break;
}
- case kWasmF64: {
+ case ValueType::kF64: {
double num = value->GetF64();
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
TRACE("%lf", num);
break;
}
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef: {
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef: {
DCHECK_IMPLIES(global.type == kWasmNullRef, value->GetRef()->IsNull());
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
- default:
+ case ValueType::kStmt:
+ case ValueType::kS128:
+ case ValueType::kBottom:
UNREACHABLE();
}
- TRACE(", type = %s (from WebAssembly.Global)\n",
- ValueTypes::TypeName(global.type));
+ TRACE(", type = %s (from WebAssembly.Global)\n", global.type.type_name());
}
void InstanceBuilder::WriteGlobalAnyRef(const WasmGlobal& global,
@@ -835,7 +833,7 @@ bool InstanceBuilder::ProcessImportedFunction(
Handle<WasmExternalFunction>::cast(value));
}
auto js_receiver = Handle<JSReceiver>::cast(value);
- FunctionSig* expected_sig = module_->functions[func_index].sig;
+ const FunctionSig* expected_sig = module_->functions[func_index].sig;
auto resolved =
compiler::ResolveWasmImportCall(js_receiver, expected_sig, enabled_);
compiler::WasmImportCallKind kind = resolved.first;
@@ -846,7 +844,7 @@ bool InstanceBuilder::ProcessImportedFunction(
import_index, module_name, import_name);
return false;
case compiler::WasmImportCallKind::kWasmToWasm: {
- // The imported function is a WASM function from another instance.
+ // The imported function is a Wasm function from another instance.
auto imported_function = Handle<WasmExportedFunction>::cast(js_receiver);
Handle<WasmInstanceObject> imported_instance(
imported_function->instance(), isolate_);
@@ -929,10 +927,10 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
Handle<WasmInstanceObject> target_instance =
maybe_target_instance.ToHandleChecked();
- FunctionSig* sig = target_instance->module_object()
- .module()
- ->functions[function_index]
- .sig;
+ const FunctionSig* sig = target_instance->module_object()
+ .module()
+ ->functions[function_index]
+ .sig;
// Look up the signature's canonical id. If there is no canonical
// id, then the signature does not appear at all in this module,
@@ -1065,7 +1063,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
return false;
}
- bool is_sub_type = ValueTypes::IsSubType(global_object->type(), global.type);
+ bool is_sub_type = global_object->type().IsSubTypeOf(global.type);
bool is_same_type = global_object->type() == global.type;
bool valid_type = global.mutability ? is_same_type : is_sub_type;
@@ -1078,7 +1076,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
DCHECK_LT(global.index, module_->num_imported_mutable_globals);
Handle<Object> buffer;
Address address_or_offset;
- if (ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
static_assert(sizeof(global_object->offset()) <= sizeof(Address),
"The offset into the globals buffer does not fit into "
"the imported_mutable_globals array");
@@ -1156,8 +1154,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return false;
}
- if (ValueTypes::IsReferenceType(global.type)) {
- if (global.type == ValueType::kWasmFuncRef) {
+ if (global.type.IsReferenceType()) {
+ if (global.type == kWasmFuncRef) {
if (!value->IsNull(isolate_) &&
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
ReportLinkError(
@@ -1165,7 +1163,7 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
import_index, module_name, import_name);
return false;
}
- } else if (global.type == ValueType::kWasmNullRef) {
+ } else if (global.type == kWasmNullRef) {
if (!value->IsNull(isolate_)) {
ReportLinkError("imported nullref global must be null", import_index,
module_name, import_name);
@@ -1212,7 +1210,7 @@ void InstanceBuilder::CompileImportWrappers(
}
auto js_receiver = Handle<JSReceiver>::cast(value);
uint32_t func_index = module_->import_table[index].index;
- FunctionSig* sig = module_->functions[func_index].sig;
+ const FunctionSig* sig = module_->functions[func_index].sig;
auto resolved = compiler::ResolveWasmImportCall(js_receiver, sig, enabled_);
compiler::WasmImportCallKind kind = resolved.first;
if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
@@ -1378,7 +1376,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
uint32_t old_offset =
module_->globals[global.init.val.global_index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- if (ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
DCHECK(enabled_.has_anyref() || enabled_.has_eh());
tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
} else {
@@ -1402,10 +1400,11 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
// Allocate memory for a module instance as a new JSArrayBuffer.
bool InstanceBuilder::AllocateMemory() {
- auto initial_pages = module_->initial_pages;
- auto maximum_pages = module_->has_maximum_pages ? module_->maximum_pages
- : wasm::max_mem_pages();
- if (initial_pages > max_mem_pages()) {
+ uint32_t initial_pages = module_->initial_pages;
+ uint32_t maximum_pages = module_->has_maximum_pages
+ ? module_->maximum_pages
+ : wasm::max_maximum_mem_pages();
+ if (initial_pages > max_initial_mem_pages()) {
thrower_->RangeError("Out of memory: wasm memory too large");
return false;
}
@@ -1516,7 +1515,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
if (global.mutability && global.imported) {
Handle<FixedArray> buffers_array(
instance->imported_mutable_globals_buffers(), isolate_);
- if (ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
tagged_buffer = handle(
FixedArray::cast(buffers_array->get(global.index)), isolate_);
// For anyref globals we store the relative offset in the
@@ -1540,7 +1539,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
offset = static_cast<uint32_t>(global_addr - backing_store);
}
} else {
- if (ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
} else {
untagged_buffer =
@@ -1685,7 +1684,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
segment_index < module_->elem_segments.size(); ++segment_index) {
auto& elem_segment = instance->module()->elem_segments[segment_index];
// Passive segments are not copied during instantiation.
- if (!elem_segment.active) continue;
+ if (elem_segment.status != WasmElemSegment::kStatusActive) continue;
uint32_t table_index = elem_segment.table_index;
uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 7a4c50303c..42230dfc06 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -29,31 +29,193 @@ class Simd128;
// I32 I64 F32 F64 NullRef
// \ \ \ \ /
// ------------ Bottom
-enum ValueType : uint8_t {
- kWasmStmt,
- kWasmI32,
- kWasmI64,
- kWasmF32,
- kWasmF64,
- kWasmS128,
- kWasmAnyRef,
- kWasmFuncRef,
- kWasmNullRef,
- kWasmExnRef,
- kWasmBottom,
+// Format: kind, log2Size, code, machineType, shortName, typeName
+#define FOREACH_VALUE_TYPE(V) \
+ V(Stmt, -1, Void, None, 'v', "<stmt>") \
+ V(I32, 2, I32, Int32, 'i', "i32") \
+ V(I64, 3, I64, Int64, 'l', "i64") \
+ V(F32, 2, F32, Float32, 'f', "f32") \
+ V(F64, 3, F64, Float64, 'd', "f64") \
+ V(S128, 4, S128, Simd128, 's', "s128") \
+ V(AnyRef, kSystemPointerSizeLog2, AnyRef, TaggedPointer, 'r', "anyref") \
+ V(FuncRef, kSystemPointerSizeLog2, FuncRef, TaggedPointer, 'a', "funcref") \
+ V(NullRef, kSystemPointerSizeLog2, NullRef, TaggedPointer, 'n', "nullref") \
+ V(ExnRef, kSystemPointerSizeLog2, ExnRef, TaggedPointer, 'e', "exn") \
+ V(Bottom, -1, Void, None, '*', "<bot>")
+
+class ValueType {
+ public:
+ enum Kind : uint8_t {
+#define DEF_ENUM(kind, ...) k##kind,
+ FOREACH_VALUE_TYPE(DEF_ENUM)
+#undef DEF_ENUM
+ };
+
+ constexpr ValueType() : kind_(kStmt) {}
+ explicit constexpr ValueType(Kind kind) : kind_(kind) {}
+
+ constexpr Kind kind() const { return kind_; }
+
+ constexpr int element_size_log2() const {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK_NE(kStmt, kind_);
+ DCHECK_NE(kBottom, kind_);
+#endif
+
+ constexpr int kElementSizeLog2[] = {
+#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
+ FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
+#undef ELEM_SIZE_LOG2
+ };
+
+ return kElementSizeLog2[kind_];
+ }
+
+ constexpr int element_size_bytes() const { return 1 << element_size_log2(); }
+
+ constexpr bool operator==(ValueType other) const {
+ return kind_ == other.kind_;
+ }
+ constexpr bool operator!=(ValueType other) const {
+ return kind_ != other.kind_;
+ }
+
+ bool IsSubTypeOf(ValueType other) const {
+ return (*this == other) || (kind_ == kNullRef && other.kind_ == kAnyRef) ||
+ (kind_ == kFuncRef && other.kind_ == kAnyRef) ||
+ (kind_ == kExnRef && other.kind_ == kAnyRef) ||
+ (kind_ == kNullRef && other.kind_ == kFuncRef) ||
+ (kind_ == kNullRef && other.kind_ == kExnRef);
+ }
+
+ bool IsReferenceType() const {
+ return kind_ == kAnyRef || kind_ == kFuncRef || kind_ == kNullRef ||
+ kind_ == kExnRef;
+ }
+
+ static ValueType CommonSubType(ValueType a, ValueType b) {
+ if (a.kind() == b.kind()) return a;
+ // The only sub type of any value type is {bot}.
+ if (!a.IsReferenceType() || !b.IsReferenceType()) {
+ return ValueType(kBottom);
+ }
+ if (a.IsSubTypeOf(b)) return a;
+ if (b.IsSubTypeOf(a)) return b;
+ // {a} and {b} are not each other's subtype. The biggest sub-type of all
+ // reference types is {kWasmNullRef}.
+ return ValueType(kNullRef);
+ }
+
+ ValueTypeCode value_type_code() const {
+ DCHECK_NE(kBottom, kind_);
+
+ constexpr ValueTypeCode kValueTypeCode[] = {
+#define TYPE_CODE(kind, log2Size, code, ...) kLocal##code,
+ FOREACH_VALUE_TYPE(TYPE_CODE)
+#undef TYPE_CODE
+ };
+
+ return kValueTypeCode[kind_];
+ }
+
+ MachineType machine_type() const {
+ DCHECK_NE(kBottom, kind_);
+
+ constexpr MachineType kMachineType[] = {
+#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
+ MachineType::machineType(),
+ FOREACH_VALUE_TYPE(MACH_TYPE)
+#undef MACH_TYPE
+ };
+
+ return kMachineType[kind_];
+ }
+
+ MachineRepresentation machine_representation() {
+ return machine_type().representation();
+ }
+
+ static ValueType For(MachineType type) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return ValueType(kI32);
+ case MachineRepresentation::kWord64:
+ return ValueType(kI64);
+ case MachineRepresentation::kFloat32:
+ return ValueType(kF32);
+ case MachineRepresentation::kFloat64:
+ return ValueType(kF64);
+ case MachineRepresentation::kTaggedPointer:
+ return ValueType(kAnyRef);
+ case MachineRepresentation::kSimd128:
+ return ValueType(kS128);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ constexpr char short_name() const {
+ constexpr char kShortName[] = {
+#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
+ FOREACH_VALUE_TYPE(SHORT_NAME)
+#undef SHORT_NAME
+ };
+
+ return kShortName[kind_];
+ }
+
+ constexpr const char* type_name() const {
+ constexpr const char* kTypeName[] = {
+#define TYPE_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
+ typeName,
+ FOREACH_VALUE_TYPE(TYPE_NAME)
+#undef TYPE_NAME
+ };
+
+ return kTypeName[kind_];
+ }
+
+ private:
+ Kind kind_ : 8;
+ // TODO(jkummerow): Add and use the following for reference types:
+ // uint32_t ref_index_ : 24;
};
+static_assert(sizeof(ValueType) <= kUInt32Size,
+ "ValueType is small and can be passed by value");
+
+inline size_t hash_value(ValueType type) {
+ return static_cast<size_t>(type.kind());
+}
+
+// Output operator, useful for DCHECKS and others.
+inline std::ostream& operator<<(std::ostream& oss, ValueType type) {
+ return oss << type.type_name();
+}
+
+constexpr ValueType kWasmI32 = ValueType(ValueType::kI32);
+constexpr ValueType kWasmI64 = ValueType(ValueType::kI64);
+constexpr ValueType kWasmF32 = ValueType(ValueType::kF32);
+constexpr ValueType kWasmF64 = ValueType(ValueType::kF64);
+constexpr ValueType kWasmAnyRef = ValueType(ValueType::kAnyRef);
+constexpr ValueType kWasmExnRef = ValueType(ValueType::kExnRef);
+constexpr ValueType kWasmFuncRef = ValueType(ValueType::kFuncRef);
+constexpr ValueType kWasmNullRef = ValueType(ValueType::kNullRef);
+constexpr ValueType kWasmS128 = ValueType(ValueType::kS128);
+constexpr ValueType kWasmStmt = ValueType(ValueType::kStmt);
+constexpr ValueType kWasmBottom = ValueType(ValueType::kBottom);
+
#define FOREACH_WASMVALUE_CTYPES(V) \
- V(kWasmI32, int32_t) \
- V(kWasmI64, int64_t) \
- V(kWasmF32, float) \
- V(kWasmF64, double) \
- V(kWasmS128, Simd128)
+ V(kI32, int32_t) \
+ V(kI64, int64_t) \
+ V(kF32, float) \
+ V(kF64, double) \
+ V(kS128, Simd128)
using FunctionSig = Signature<ValueType>;
-inline size_t hash_value(ValueType type) { return static_cast<size_t>(type); }
-
#define FOREACH_LOAD_TYPE(V) \
V(I32, , Int32) \
V(I32, 8S, Int8) \
@@ -79,7 +241,7 @@ class LoadType {
#undef DEF_ENUM
};
- // Allow implicit convertion of the enum value to this wrapper.
+ // Allow implicit conversion of the enum value to this wrapper.
constexpr LoadType(LoadTypeValue val) // NOLINT(runtime/explicit)
: val_(val) {}
@@ -90,16 +252,16 @@ class LoadType {
constexpr MachineType mem_type() const { return kMemType[val_]; }
static LoadType ForValueType(ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
return kI32Load;
- case kWasmI64:
+ case ValueType::kI64:
return kI64Load;
- case kWasmF32:
+ case ValueType::kF32:
return kF32Load;
- case kWasmF64:
+ case ValueType::kF64:
return kF64Load;
- case kWasmS128:
+ case ValueType::kS128:
return kS128Load;
default:
UNREACHABLE();
@@ -119,7 +281,7 @@ class LoadType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) kWasm##type,
+#define VALUE_TYPE(type, ...) ValueType(ValueType::k##type),
FOREACH_LOAD_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
@@ -162,16 +324,16 @@ class StoreType {
constexpr MachineRepresentation mem_rep() const { return kMemRep[val_]; }
static StoreType ForValueType(ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
return kI32Store;
- case kWasmI64:
+ case ValueType::kI64:
return kI64Store;
- case kWasmF32:
+ case ValueType::kF32:
return kF32Store;
- case kWasmF64:
+ case ValueType::kF64:
return kF64Store;
- case kWasmS128:
+ case ValueType::kS128:
return kS128Store;
default:
UNREACHABLE();
@@ -190,7 +352,7 @@ class StoreType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) kWasm##type,
+#define VALUE_TYPE(type, ...) ValueType(ValueType::k##type),
FOREACH_STORE_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
@@ -202,235 +364,6 @@ class StoreType {
};
};
-// A collection of ValueType-related static methods.
-class V8_EXPORT_PRIVATE ValueTypes {
- public:
- static inline bool IsSubType(ValueType actual, ValueType expected) {
- return (expected == actual) ||
- (expected == kWasmAnyRef && actual == kWasmNullRef) ||
- (expected == kWasmAnyRef && actual == kWasmFuncRef) ||
- (expected == kWasmAnyRef && actual == kWasmExnRef) ||
- (expected == kWasmFuncRef && actual == kWasmNullRef) ||
- (expected == kWasmExnRef && actual == kWasmNullRef);
- }
-
- static inline bool IsReferenceType(ValueType type) {
- return type == kWasmAnyRef || type == kWasmFuncRef ||
- type == kWasmNullRef || type == kWasmExnRef;
- }
-
- static inline ValueType CommonSubType(ValueType a, ValueType b) {
- if (a == b) return a;
- // The only sub type of any value type is {bot}.
- if (!IsReferenceType(a) || !IsReferenceType(b)) return kWasmBottom;
- if (IsSubType(a, b)) return a;
- if (IsSubType(b, a)) return b;
- // {a} and {b} are not each other's subtype. The biggest sub-type of all
- // reference types is {kWasmNullRef}.
- return kWasmNullRef;
- }
-
- static byte MemSize(MachineType type) {
- return 1 << i::ElementSizeLog2Of(type.representation());
- }
-
- static int ElementSizeInBytes(ValueType type) {
- switch (type) {
- case kWasmI32:
- case kWasmF32:
- return 4;
- case kWasmI64:
- case kWasmF64:
- return 8;
- case kWasmS128:
- return 16;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
- return kSystemPointerSize;
- case kWasmStmt:
- case kWasmBottom:
- UNREACHABLE();
- }
- }
-
- static int ElementSizeLog2Of(ValueType type) {
- switch (type) {
- case kWasmI32:
- case kWasmF32:
- return 2;
- case kWasmI64:
- case kWasmF64:
- return 3;
- case kWasmS128:
- return 4;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
- return kSystemPointerSizeLog2;
- case kWasmStmt:
- case kWasmBottom:
- UNREACHABLE();
- }
- }
-
- static byte MemSize(ValueType type) { return 1 << ElementSizeLog2Of(type); }
-
- static ValueTypeCode ValueTypeCodeFor(ValueType type) {
- switch (type) {
- case kWasmI32:
- return kLocalI32;
- case kWasmI64:
- return kLocalI64;
- case kWasmF32:
- return kLocalF32;
- case kWasmF64:
- return kLocalF64;
- case kWasmS128:
- return kLocalS128;
- case kWasmAnyRef:
- return kLocalAnyRef;
- case kWasmFuncRef:
- return kLocalFuncRef;
- case kWasmNullRef:
- return kLocalNullRef;
- case kWasmExnRef:
- return kLocalExnRef;
- case kWasmStmt:
- return kLocalVoid;
- case kWasmBottom:
- UNREACHABLE();
- }
- }
-
- static MachineType MachineTypeFor(ValueType type) {
- switch (type) {
- case kWasmI32:
- return MachineType::Int32();
- case kWasmI64:
- return MachineType::Int64();
- case kWasmF32:
- return MachineType::Float32();
- case kWasmF64:
- return MachineType::Float64();
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
- return MachineType::TaggedPointer();
- case kWasmS128:
- return MachineType::Simd128();
- case kWasmStmt:
- return MachineType::None();
- case kWasmBottom:
- UNREACHABLE();
- }
- }
-
- static MachineRepresentation MachineRepresentationFor(ValueType type) {
- switch (type) {
- case kWasmI32:
- return MachineRepresentation::kWord32;
- case kWasmI64:
- return MachineRepresentation::kWord64;
- case kWasmF32:
- return MachineRepresentation::kFloat32;
- case kWasmF64:
- return MachineRepresentation::kFloat64;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
- return MachineRepresentation::kTaggedPointer;
- case kWasmS128:
- return MachineRepresentation::kSimd128;
- case kWasmStmt:
- return MachineRepresentation::kNone;
- case kWasmBottom:
- UNREACHABLE();
- }
- }
-
- static ValueType ValueTypeFor(MachineType type) {
- switch (type.representation()) {
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- return kWasmI32;
- case MachineRepresentation::kWord64:
- return kWasmI64;
- case MachineRepresentation::kFloat32:
- return kWasmF32;
- case MachineRepresentation::kFloat64:
- return kWasmF64;
- case MachineRepresentation::kTaggedPointer:
- return kWasmAnyRef;
- case MachineRepresentation::kSimd128:
- return kWasmS128;
- default:
- UNREACHABLE();
- }
- }
-
- static char ShortNameOf(ValueType type) {
- switch (type) {
- case kWasmI32:
- return 'i';
- case kWasmI64:
- return 'l';
- case kWasmF32:
- return 'f';
- case kWasmF64:
- return 'd';
- case kWasmAnyRef:
- return 'r';
- case kWasmFuncRef:
- return 'a';
- case kWasmS128:
- return 's';
- case kWasmStmt:
- return 'v';
- case kWasmNullRef:
- return 'n';
- case kWasmExnRef:
- case kWasmBottom:
- return '*';
- }
- }
-
- static const char* TypeName(ValueType type) {
- switch (type) {
- case kWasmI32:
- return "i32";
- case kWasmI64:
- return "i64";
- case kWasmF32:
- return "f32";
- case kWasmF64:
- return "f64";
- case kWasmAnyRef:
- return "anyref";
- case kWasmFuncRef:
- return "funcref";
- case kWasmNullRef:
- return "nullref";
- case kWasmExnRef:
- return "exn";
- case kWasmS128:
- return "s128";
- case kWasmStmt:
- return "<stmt>";
- case kWasmBottom:
- return "<bot>";
- }
- }
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ValueTypes);
-};
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
index 822f46addd..e6f212b2d9 100644
--- a/deps/v8/src/wasm/wasm-arguments.h
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -45,14 +45,14 @@ class CWasmArgumentsPacker {
return base::ReadUnalignedValue<T>(address);
}
- static int TotalSize(FunctionSig* sig) {
+ static int TotalSize(const FunctionSig* sig) {
int return_size = 0;
for (ValueType t : sig->returns()) {
- return_size += ValueTypes::ElementSizeInBytes(t);
+ return_size += t.element_size_bytes();
}
int param_size = 0;
for (ValueType t : sig->parameters()) {
- param_size += ValueTypes::ElementSizeInBytes(t);
+ param_size += t.element_size_bytes();
}
return std::max(return_size, param_size);
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 8eb0f5d97f..99cf484b17 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -133,31 +133,47 @@ Address WasmCode::handler_table() const {
return instruction_start() + handler_table_offset_;
}
-uint32_t WasmCode::handler_table_size() const {
+int WasmCode::handler_table_size() const {
DCHECK_GE(constant_pool_offset_, handler_table_offset_);
- return static_cast<uint32_t>(constant_pool_offset_ - handler_table_offset_);
+ return static_cast<int>(constant_pool_offset_ - handler_table_offset_);
}
Address WasmCode::code_comments() const {
return instruction_start() + code_comments_offset_;
}
-uint32_t WasmCode::code_comments_size() const {
+int WasmCode::code_comments_size() const {
DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
- return static_cast<uint32_t>(unpadded_binary_size_ - code_comments_offset_);
+ return static_cast<int>(unpadded_binary_size_ - code_comments_offset_);
+}
+
+std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
+ std::initializer_list<Vector<const byte>> vectors) {
+ size_t total_size = 0;
+ for (auto& vec : vectors) total_size += vec.size();
+ // Use default-initialization (== no initialization).
+ std::unique_ptr<byte[]> result{new byte[total_size]};
+ byte* ptr = result.get();
+ for (auto& vec : vectors) {
+ if (vec.empty()) continue; // Avoid nullptr in {memcpy}.
+ memcpy(ptr, vec.begin(), vec.size());
+ ptr += vec.size();
+ }
+ return result;
}
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!has_trap_handler_index());
if (kind() != WasmCode::kFunction) return;
- if (protected_instructions_.empty()) return;
+ if (protected_instructions_size_ == 0) return;
Address base = instruction_start();
size_t size = instructions().size();
+ auto protected_instruction_data = this->protected_instructions();
const int index =
- RegisterHandlerData(base, size, protected_instructions().size(),
- protected_instructions().begin());
+ RegisterHandlerData(base, size, protected_instruction_data.size(),
+ protected_instruction_data.begin());
// TODO(eholk): if index is negative, fail.
CHECK_LE(0, index);
@@ -179,8 +195,8 @@ void WasmCode::LogCode(Isolate* isolate) const {
if (IsAnonymous()) return;
ModuleWireBytes wire_bytes(native_module()->wire_bytes());
- WireBytesRef name_ref =
- native_module()->module()->function_names.Lookup(wire_bytes, index());
+ WireBytesRef name_ref = native_module()->module()->function_names.Lookup(
+ wire_bytes, index(), VectorOf(native_module()->module()->export_table));
WasmName name = wire_bytes.GetNameOrNull(name_ref);
const std::string& source_map_url = native_module()->module()->source_map_url;
@@ -195,28 +211,28 @@ void WasmCode::LogCode(Isolate* isolate) const {
std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
- std::unique_ptr<char[]> name_buffer;
+ std::string name_buffer;
if (kind_ == kWasmToJsWrapper) {
- constexpr size_t kNameBufferLen = 128;
- constexpr size_t kNamePrefixLen = 11;
- name_buffer = std::make_unique<char[]>(kNameBufferLen);
- memcpy(name_buffer.get(), "wasm-to-js:", kNamePrefixLen);
- Vector<char> remaining_buf =
- VectorOf(name_buffer.get(), kNameBufferLen) + kNamePrefixLen;
- FunctionSig* sig = native_module()->module()->functions[index_].sig;
- remaining_buf += PrintSignature(remaining_buf, sig);
+ name_buffer = "wasm-to-js:";
+ size_t prefix_len = name_buffer.size();
+ constexpr size_t kMaxSigLength = 128;
+ name_buffer.resize(prefix_len + kMaxSigLength);
+ const FunctionSig* sig = native_module()->module()->functions[index_].sig;
+ size_t sig_length =
+ PrintSignature(VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
+ name_buffer.resize(prefix_len + sig_length);
// If the import has a name, also append that (separated by "-").
- if (!name.empty() && remaining_buf.length() > 1) {
- remaining_buf[0] = '-';
- remaining_buf += 1;
- size_t suffix_len = std::min(name.size(), remaining_buf.size());
- memcpy(remaining_buf.begin(), name.begin(), suffix_len);
- remaining_buf += suffix_len;
+ if (!name.empty()) {
+ name_buffer += '-';
+ name_buffer.append(name.begin(), name.size());
}
- size_t name_len = remaining_buf.begin() - name_buffer.get();
- name = VectorOf(name_buffer.get(), name_len);
+ name = VectorOf(name_buffer);
} else if (name.empty()) {
- name = CStrVector("<wasm-unnamed>");
+ name_buffer.resize(32);
+ name_buffer.resize(
+ SNPrintF(VectorOf(&name_buffer.front(), name_buffer.size()),
+ "wasm-function[%d]", index()));
+ name = VectorOf(name_buffer);
}
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name));
@@ -296,7 +312,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
<< unpadded_binary_size_ << " + " << padding << " padding)\n";
#ifdef ENABLE_DISASSEMBLER
- size_t instruction_size = unpadded_binary_size_;
+ int instruction_size = unpadded_binary_size_;
if (constant_pool_offset_ < instruction_size) {
instruction_size = constant_pool_offset_;
}
@@ -322,7 +338,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
}
- if (!protected_instructions_.empty()) {
+ if (protected_instructions_size_ > 0) {
os << "Protected instructions:\n pc offset land pad\n";
for (auto& data : protected_instructions()) {
os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
@@ -364,7 +380,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
}
- os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
+ os << "RelocInfo (size = " << reloc_info().size() << ")\n";
for (RelocIterator it(instructions(), reloc_info(), constant_pool());
!it.done(); it.next()) {
it.rinfo()->Print(nullptr, os);
@@ -440,13 +456,11 @@ void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
- bool can_request_more,
std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
free_code_space_(code_space.region()),
- can_request_more_memory_(can_request_more),
async_counters_(std::move(async_counters)) {
- owned_code_space_.reserve(can_request_more ? 4 : 1);
+ owned_code_space_.reserve(4);
owned_code_space_.emplace_back(std::move(code_space));
async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
@@ -502,8 +516,7 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
}
int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
- return NativeModule::kNeedsFarJumpsBetweenCodeSpaces &&
- FLAG_wasm_far_jump_table
+ return NativeModule::kNeedsFarJumpsBetweenCodeSpaces
? static_cast<int>(num_declared_functions)
: 0;
}
@@ -571,12 +584,8 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
base::AddressRegion code_space =
free_code_space_.AllocateInRegion(size, region);
if (code_space.is_empty()) {
- const bool in_specific_region =
- region.size() < std::numeric_limits<size_t>::max();
- if (!can_request_more_memory_ || in_specific_region) {
- auto error = in_specific_region ? "wasm code reservation in region"
- : "wasm code reservation";
- V8::FatalProcessOutOfMemory(nullptr, error);
+ if (region.size() < std::numeric_limits<size_t>::max()) {
+ V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation in region");
UNREACHABLE();
}
@@ -649,26 +658,20 @@ bool WasmCodeAllocator::SetExecutable(bool executable) {
executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
#if V8_OS_WIN
// On windows, we need to switch permissions per separate virtual memory
- // reservation. This is really just a problem when the NativeModule is
- // growable (meaning can_request_more_memory_). That's 32-bit in production,
- // or unittests.
+ // reservation.
// For now, in that case, we commit at reserved memory granularity.
// Technically, that may be a waste, because we may reserve more than we
// use. On 32-bit though, the scarce resource is the address space -
// committed or not.
- if (can_request_more_memory_) {
- for (auto& vmem : owned_code_space_) {
- if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
- permission)) {
- return false;
- }
- TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
- executable);
+ for (auto& vmem : owned_code_space_) {
+ if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
+ permission)) {
+ return false;
}
- is_executable_ = executable;
- return true;
+ TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
+ executable);
}
-#endif
+#else // V8_OS_WIN
size_t commit_page_size = page_allocator->CommitPageSize();
for (auto& region : allocated_code_space_.regions()) {
// allocated_code_space_ is fine-grained, so we need to
@@ -681,6 +684,7 @@ bool WasmCodeAllocator::SetExecutable(bool executable) {
TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
region.begin(), region.end(), executable);
}
+#endif // V8_OS_WIN
}
is_executable_ = executable;
return true;
@@ -730,12 +734,12 @@ size_t WasmCodeAllocator::GetNumCodeSpaces() const {
}
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
- bool can_request_more, VirtualMemory code_space,
+ VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
: code_allocator_(engine->code_manager(), std::move(code_space),
- can_request_more, async_counters),
+ async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
@@ -796,8 +800,10 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- return {module(), use_trap_handler_, kRuntimeExceptionSupport,
- enabled_features_};
+ // Protect concurrent accesses to {tier_down_}.
+ base::MutexGuard guard(&allocation_mutex_);
+ return {module(), use_trap_handler_, kRuntimeExceptionSupport,
+ enabled_features_, kNoLowerSimd, tier_down_};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
@@ -822,20 +828,16 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
Vector<const byte> instructions(
reinterpret_cast<byte*>(code->InstructionStart()),
static_cast<size_t>(code->InstructionSize()));
- const uint32_t stack_slots = static_cast<uint32_t>(
- code->has_safepoint_info() ? code->stack_slots() : 0);
+ const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// Code objects contains real offsets but WasmCode expects an offset of 0 to
// mean 'empty'.
- const size_t safepoint_table_offset = static_cast<size_t>(
- code->has_safepoint_table() ? code->safepoint_table_offset() : 0);
- const size_t handler_table_offset =
- static_cast<size_t>(code->handler_table_offset());
- const size_t constant_pool_offset =
- static_cast<size_t>(code->constant_pool_offset());
- const size_t code_comments_offset =
- static_cast<size_t>(code->code_comments_offset());
+ const int safepoint_table_offset =
+ code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
+ const int handler_table_offset = code->handler_table_offset();
+ const int constant_pool_offset = code->constant_pool_offset();
+ const int code_comments_offset = code->code_comments_offset();
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
@@ -844,10 +846,10 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
code->InstructionStart();
- int mode_mask = RelocInfo::kApplyMask |
- RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
+ int mode_mask =
+ RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
- FindJumpTablesForCode(reinterpret_cast<Address>(dst_code_bytes.begin()));
+ FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
@@ -869,22 +871,22 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Flush the i-cache after relocation.
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
- std::unique_ptr<WasmCode> new_code{new WasmCode{
- this, // native_module
- kAnonymousFuncIndex, // index
- dst_code_bytes, // instructions
- stack_slots, // stack_slots
- 0, // tagged_parameter_slots
- safepoint_table_offset, // safepoint_table_offset
- handler_table_offset, // handler_table_offset
- constant_pool_offset, // constant_pool_offset
- code_comments_offset, // code_comments_offset
- instructions.size(), // unpadded_binary_size
- OwnedVector<ProtectedInstructionData>{}, // protected_instructions
- std::move(reloc_info), // reloc_info
- std::move(source_pos), // source positions
- WasmCode::kFunction, // kind
- ExecutionTier::kNone}}; // tier
+ std::unique_ptr<WasmCode> new_code{
+ new WasmCode{this, // native_module
+ kAnonymousFuncIndex, // index
+ dst_code_bytes, // instructions
+ stack_slots, // stack_slots
+ 0, // tagged_parameter_slots
+ safepoint_table_offset, // safepoint_table_offset
+ handler_table_offset, // handler_table_offset
+ constant_pool_offset, // constant_pool_offset
+ code_comments_offset, // code_comments_offset
+ instructions.length(), // unpadded_binary_size
+ {}, // protected_instructions
+ reloc_info.as_vector(), // reloc_info
+ source_pos.as_vector(), // source positions
+ WasmCode::kFunction, // kind
+ ExecutionTier::kNone}}; // tier
new_code->MaybePrint(nullptr);
new_code->Validate();
@@ -911,9 +913,9 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions,
- GetNearRuntimeStubEntry(
- WasmCode::kWasmCompileLazy,
- FindJumpTablesForCode(lazy_compile_table_->instruction_start())));
+ GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
+ FindJumpTablesForRegion(base::AddressRegionOf(
+ lazy_compile_table_->instructions()))));
}
// Add jump table entry for jump to the lazy compile stub.
@@ -927,47 +929,38 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
}
std::unique_ptr<WasmCode> NativeModule::AddCode(
- uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- uint32_t tagged_parameter_slots,
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ int index, const CodeDesc& desc, int stack_slots,
+ int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
+ Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, desc.instr_size);
auto jump_table_ref =
- FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin()));
+ FindJumpTablesForRegion(base::AddressRegionOf(code_space));
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
- std::move(protected_instructions),
- std::move(source_position_table), kind, tier,
- code_space, jump_table_ref);
+ protected_instructions_data,
+ source_position_table, kind, tier, code_space,
+ jump_table_ref);
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
- uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- uint32_t tagged_parameter_slots,
- OwnedVector<ProtectedInstructionData> protected_instructions,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ int index, const CodeDesc& desc, int stack_slots,
+ int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
+ Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, Vector<uint8_t> dst_code_bytes,
- const JumpTablesRef& jump_tables_ref) {
- OwnedVector<byte> reloc_info;
- if (desc.reloc_size > 0) {
- reloc_info = OwnedVector<byte>::New(desc.reloc_size);
- memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
- desc.reloc_size);
- }
+ const JumpTablesRef& jump_tables) {
+ Vector<byte> reloc_info{desc.buffer + desc.buffer_size - desc.reloc_size,
+ static_cast<size_t>(desc.reloc_size)};
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
// 'empty'.
- const size_t safepoint_table_offset = static_cast<size_t>(
- desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset);
- const size_t handler_table_offset =
- static_cast<size_t>(desc.handler_table_offset);
- const size_t constant_pool_offset =
- static_cast<size_t>(desc.constant_pool_offset);
- const size_t code_comments_offset =
- static_cast<size_t>(desc.code_comments_offset);
- const size_t instr_size = static_cast<size_t>(desc.instr_size);
+ const int safepoint_table_offset =
+ desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
+ const int handler_table_offset = desc.handler_table_offset;
+ const int constant_pool_offset = desc.constant_pool_offset;
+ const int code_comments_offset = desc.code_comments_offset;
+ const int instr_size = desc.instr_size;
memcpy(dst_code_bytes.begin(), desc.buffer,
static_cast<size_t>(desc.instr_size));
@@ -979,19 +972,19 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = code_start + constant_pool_offset;
- for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
- constant_pool_start, mode_mask);
+ for (RelocIterator it(dst_code_bytes, reloc_info, constant_pool_start,
+ mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmCall(mode)) {
uint32_t call_tag = it.rinfo()->wasm_call_tag();
- Address target = GetNearCallTargetForFunction(call_tag, jump_tables_ref);
+ Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
Address entry = GetNearRuntimeStubEntry(
- static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
+ static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables);
it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
} else {
it.rinfo()->apply(delta);
@@ -1004,8 +997,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
std::unique_ptr<WasmCode> code{new WasmCode{
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
- code_comments_offset, instr_size, std::move(protected_instructions),
- std::move(reloc_info), std::move(source_position_table), kind, tier}};
+ code_comments_offset, instr_size, protected_instructions_data, reloc_info,
+ source_position_table, kind, tier}};
code->MaybePrint();
code->Validate();
@@ -1052,9 +1045,11 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// tier.
uint32_t slot_idx = declared_function_index(module(), code->index());
WasmCode* prior_code = code_table_[slot_idx];
- bool update_code_table =
- tier_down_ ? !prior_code || code->tier() == ExecutionTier::kLiftoff
- : !prior_code || prior_code->tier() < code->tier();
+ // TODO(clemensb): Revisit this logic once tier down is fully working.
+ const bool prefer_liftoff = tier_down_ || debug_info_;
+ const bool update_code_table =
+ prefer_liftoff ? !prior_code || code->tier() == ExecutionTier::kLiftoff
+ : !prior_code || prior_code->tier() < code->tier();
if (update_code_table) {
code_table_[slot_idx] = code.get();
if (prior_code) {
@@ -1088,14 +1083,13 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
}
WasmCode* NativeModule::AddDeserializedCode(
- uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
- uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
- size_t handler_table_offset, size_t constant_pool_offset,
- size_t code_comments_offset, size_t unpadded_binary_size,
- OwnedVector<ProtectedInstructionData> protected_instructions,
- OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier) {
+ int index, Vector<const byte> instructions, int stack_slots,
+ int tagged_parameter_slots, int safepoint_table_offset,
+ int handler_table_offset, int constant_pool_offset,
+ int code_comments_offset, int unpadded_binary_size,
+ Vector<const byte> protected_instructions_data,
+ Vector<const byte> reloc_info, Vector<const byte> source_position_table,
+ WasmCode::Kind kind, ExecutionTier tier) {
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
@@ -1103,9 +1097,8 @@ WasmCode* NativeModule::AddDeserializedCode(
std::unique_ptr<WasmCode> code{new WasmCode{
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
- code_comments_offset, unpadded_binary_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_position_table), kind, tier}};
+ code_comments_offset, unpadded_binary_size, protected_instructions_data,
+ reloc_info, source_position_table, kind, tier}};
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
@@ -1132,6 +1125,12 @@ bool NativeModule::HasCode(uint32_t index) const {
return code_table_[declared_function_index(module(), index)] != nullptr;
}
+bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
+ base::MutexGuard guard(&allocation_mutex_);
+ return code_table_[declared_function_index(module(), index)] != nullptr &&
+ code_table_[declared_function_index(module(), index)]->tier() == tier;
+}
+
void NativeModule::SetWasmSourceMap(
std::unique_ptr<WasmModuleSourceMap> source_map) {
source_map_ = std::move(source_map);
@@ -1142,7 +1141,7 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
}
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
- uint32_t jump_table_size, base::AddressRegion region,
+ int jump_table_size, base::AddressRegion region,
const WasmCodeAllocator::OptionalLock& allocator_lock) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
@@ -1150,22 +1149,22 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
this, jump_table_size, region, allocator_lock);
DCHECK(!code_space.empty());
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
- std::unique_ptr<WasmCode> code{new WasmCode{
- this, // native_module
- kAnonymousFuncIndex, // index
- code_space, // instructions
- 0, // stack_slots
- 0, // tagged_parameter_slots
- 0, // safepoint_table_offset
- jump_table_size, // handler_table_offset
- jump_table_size, // constant_pool_offset
- jump_table_size, // code_comments_offset
- jump_table_size, // unpadded_binary_size
- OwnedVector<ProtectedInstructionData>{}, // protected_instructions
- OwnedVector<const uint8_t>{}, // reloc_info
- OwnedVector<const uint8_t>{}, // source_pos
- WasmCode::kJumpTable, // kind
- ExecutionTier::kNone}}; // tier
+ std::unique_ptr<WasmCode> code{
+ new WasmCode{this, // native_module
+ kAnonymousFuncIndex, // index
+ code_space, // instructions
+ 0, // stack_slots
+ 0, // tagged_parameter_slots
+ 0, // safepoint_table_offset
+ jump_table_size, // handler_table_offset
+ jump_table_size, // constant_pool_offset
+ jump_table_size, // code_comments_offset
+ jump_table_size, // unpadded_binary_size
+ {}, // protected_instructions
+ {}, // reloc_info
+ {}, // source_pos
+ WasmCode::kJumpTable, // kind
+ ExecutionTier::kNone}}; // tier
return PublishCode(std::move(code));
}
@@ -1238,12 +1237,10 @@ void NativeModule::AddCodeSpace(
WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
- const bool has_functions = num_wasm_functions > 0;
const bool is_first_code_space = code_space_data_.empty();
- // TODO(clemensb): Avoid additional jump table if the code space is close
- // enough to another existing code space.
- const bool needs_jump_table =
- has_functions && (kNeedsFarJumpsBetweenCodeSpaces || is_first_code_space);
+ // We always need a far jump table, because it contains the runtime stubs.
+ const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
+ const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
if (needs_jump_table) {
jump_table = CreateEmptyJumpTableInRegion(
@@ -1252,32 +1249,39 @@ void NativeModule::AddCodeSpace(
CHECK(region.contains(jump_table->instruction_start()));
}
- // Always allocate a far jump table, because it contains the runtime stubs.
- int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
- far_jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfFarJumpSlots(
- WasmCode::kRuntimeStubCount,
- NumWasmFunctionsInFarJumpTable(num_function_slots)),
- region, allocator_lock);
- CHECK(region.contains(far_jump_table->instruction_start()));
- EmbeddedData embedded_data = EmbeddedData::FromBlob();
+ if (needs_far_jump_table) {
+ int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
+ far_jump_table = CreateEmptyJumpTableInRegion(
+ JumpTableAssembler::SizeForNumberOfFarJumpSlots(
+ WasmCode::kRuntimeStubCount,
+ NumWasmFunctionsInFarJumpTable(num_function_slots)),
+ region, allocator_lock);
+ CHECK(region.contains(far_jump_table->instruction_start()));
+ EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
- WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
+ Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
+ WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
- Address builtin_addresses[WasmCode::kRuntimeStubCount];
- for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Builtins::Name builtin = stub_names[i];
- CHECK(embedded_data.ContainsBuiltin(builtin));
- builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ Address builtin_addresses[WasmCode::kRuntimeStubCount];
+ for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Builtins::Name builtin = stub_names[i];
+ CHECK(embedded_data.ContainsBuiltin(builtin));
+ builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ }
+ JumpTableAssembler::GenerateFarJumpTable(
+ far_jump_table->instruction_start(), builtin_addresses,
+ WasmCode::kRuntimeStubCount, num_function_slots);
}
- JumpTableAssembler::GenerateFarJumpTable(
- far_jump_table->instruction_start(), builtin_addresses,
- WasmCode::kRuntimeStubCount, num_function_slots);
- if (is_first_code_space) main_jump_table_ = jump_table;
+ if (is_first_code_space) {
+ // This can be updated and accessed without locks, since the addition of the
+ // first code space happens during initialization of the {NativeModule},
+ // where no concurrent accesses are possible.
+ main_jump_table_ = jump_table;
+ main_far_jump_table_ = far_jump_table;
+ }
base::MutexGuard guard(&allocation_mutex_);
code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
@@ -1354,32 +1358,60 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
return main_jump_table_->instruction_start() + slot_offset;
}
-NativeModule::JumpTablesRef NativeModule::FindJumpTablesForCode(
- Address code_addr) const {
+NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
+ base::AddressRegion code_region) const {
+ auto jump_table_usable = [code_region](const WasmCode* jump_table) {
+ Address table_start = jump_table->instruction_start();
+ Address table_end = table_start + jump_table->instructions().size();
+ // Compute the maximum distance from anywhere in the code region to anywhere
+ // in the jump table, avoiding any underflow.
+ size_t max_distance = std::max(
+ code_region.end() > table_start ? code_region.end() - table_start : 0,
+ table_end > code_region.begin() ? table_end - code_region.begin() : 0);
+ return max_distance < kMaxWasmCodeSpaceSize;
+ };
+
+ // Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
+ // Access to these fields is possible without locking, since these fields are
+ // initialized on construction of the {NativeModule}.
+ if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
+ (main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
+ return {
+ main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
+ main_far_jump_table_->instruction_start()};
+ }
+
+ // Otherwise, take the mutex and look for another suitable jump table.
base::MutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
- const bool jump_table_reachable =
- !kNeedsFarJumpsBetweenCodeSpaces ||
- code_space_data.region.contains(code_addr);
- if (jump_table_reachable && code_space_data.far_jump_table) {
- // We might not have a jump table if we have no functions.
- return {code_space_data.jump_table
- ? code_space_data.jump_table->instruction_start()
- : kNullAddress,
- code_space_data.far_jump_table->instruction_start()};
+ DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
+ if (!code_space_data.far_jump_table) continue;
+ // Only return these jump tables if they are reachable from the whole
+ // {code_region}.
+ if (kNeedsFarJumpsBetweenCodeSpaces &&
+ (!jump_table_usable(code_space_data.far_jump_table) ||
+ (code_space_data.jump_table &&
+ !jump_table_usable(code_space_data.jump_table)))) {
+ continue;
}
+ return {code_space_data.jump_table
+ ? code_space_data.jump_table->instruction_start()
+ : kNullAddress,
+ code_space_data.far_jump_table->instruction_start()};
}
- FATAL("code_addr is not part of a code space");
+ return {};
}
Address NativeModule::GetNearCallTargetForFunction(
uint32_t func_index, const JumpTablesRef& jump_tables) const {
+ DCHECK(jump_tables.is_valid());
uint32_t slot_offset = GetJumpTableOffset(func_index);
return jump_tables.jump_table_start + slot_offset;
}
Address NativeModule::GetNearRuntimeStubEntry(
WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
+ DCHECK(jump_tables.is_valid());
auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
return jump_tables.far_jump_table_start + offset;
}
@@ -1657,8 +1689,7 @@ size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
- size_t code_size_estimate, bool can_request_more,
- std::shared_ptr<const WasmModule> module) {
+ size_t code_size_estimate, std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
if (total_committed_code_space_.load() >
critical_committed_code_space_.load()) {
@@ -1672,9 +1703,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
// If we cannot add code space later, reserve enough address space up front.
size_t code_vmem_size =
- can_request_more ? ReservationSize(code_size_estimate,
- module->num_declared_functions, 0)
- : kMaxWasmCodeSpaceSize;
+ ReservationSize(code_size_estimate, module->num_declared_functions, 0);
// The '--wasm-max-code-space-reservation' testing flag can be used to reduce
// the maximum size of the initial code space reservation (in MB).
@@ -1705,8 +1734,8 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t size = code_space.size();
Address end = code_space.end();
std::shared_ptr<NativeModule> ret;
- new NativeModule(engine, enabled, can_request_more, std::move(code_space),
- std::move(module), isolate->async_counters(), &ret);
+ new NativeModule(engine, enabled, std::move(code_space), std::move(module),
+ isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
@@ -1740,8 +1769,6 @@ void NativeModule::SampleCodeSize(
if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
size_t freed_size = code_allocator_.freed_code_size();
DCHECK_LE(freed_size, generated_size);
- int total_freed_mb = static_cast<int>(freed_size / MB);
- counters->wasm_module_freed_code_size_mb()->AddSample(total_freed_mb);
int freed_percent = static_cast<int>(100 * freed_size / generated_size);
counters->wasm_module_freed_code_size_percent()->AddSample(
freed_percent);
@@ -1768,8 +1795,7 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, total_code_space);
// Lookup the jump tables to use once, then use for all code objects.
- auto jump_tables_ref =
- FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin()));
+ auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
std::vector<std::unique_ptr<WasmCode>> generated_code;
generated_code.reserve(results.size());
@@ -1782,9 +1808,10 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
code_space += code_size;
generated_code.emplace_back(AddCodeWithCodeSpace(
result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), GetCodeKind(result),
- result.result_tier, this_code_space, jump_tables_ref));
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
+ result.result_tier, this_code_space, jump_tables));
}
DCHECK_EQ(0, code_space.size());
@@ -1806,18 +1833,37 @@ bool NativeModule::IsRedirectedToInterpreter(uint32_t func_index) {
return has_interpreter_redirection(func_index);
}
+bool NativeModule::SetTieredDown() {
+ // Do not tier down asm.js.
+ if (module()->origin != kWasmOrigin) return false;
+
+ base::MutexGuard lock(&allocation_mutex_);
+ if (tier_down_) return true;
+ tier_down_ = true;
+ return false;
+}
+
+bool NativeModule::IsTieredDown() {
+ base::MutexGuard lock(&allocation_mutex_);
+ return tier_down_;
+}
+
void NativeModule::TierDown(Isolate* isolate) {
- // Set the flag.
- {
- base::MutexGuard lock(&allocation_mutex_);
- tier_down_ = true;
- }
+ // Do not tier down asm.js.
+ if (module()->origin != kWasmOrigin) return;
+
+ // Set the flag. Return if it is already set.
+ if (SetTieredDown()) return;
+
// Tier down all functions.
isolate->wasm_engine()->RecompileAllFunctions(isolate, this,
ExecutionTier::kLiftoff);
}
void NativeModule::TierUp(Isolate* isolate) {
+ // Do not tier up asm.js.
+ if (module()->origin != kWasmOrigin) return;
+
// Set the flag.
{
base::MutexGuard lock(&allocation_mutex_);
@@ -1825,21 +1871,18 @@ void NativeModule::TierUp(Isolate* isolate) {
}
// Tier up all functions.
- // TODO(duongn): parallelize this eventually.
- for (uint32_t index = module_->num_imported_functions;
- index < num_functions(); index++) {
- isolate->wasm_engine()->CompileFunction(isolate, this, index,
- ExecutionTier::kTurbofan);
- DCHECK(!compilation_state()->failed());
- }
+ isolate->wasm_engine()->RecompileAllFunctions(isolate, this,
+ ExecutionTier::kTurbofan);
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
// Free the code space.
code_allocator_.FreeCode(codes);
- // Free the {WasmCode} objects. This will also unregister trap handler data.
base::MutexGuard guard(&allocation_mutex_);
+ // Remove debug side tables for all removed code objects.
+ if (debug_info_) debug_info_->RemoveDebugSideTables(codes);
+ // Free the {WasmCode} objects. This will also unregister trap handler data.
for (WasmCode* code : codes) {
DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
owned_code_.erase(code->instruction_start());
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 513426785f..4b176f3ba6 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -48,10 +48,15 @@ struct WasmModule;
#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
FOREACH_WASM_TRAPREASON(VTRAP) \
V(WasmCompileLazy) \
+ V(WasmDebugBreak) \
V(WasmAtomicNotify) \
- V(WasmI32AtomicWait) \
- V(WasmI64AtomicWait) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI32AtomicWait64) \
+ V(WasmI64AtomicWait32) \
+ V(WasmI64AtomicWait64) \
V(WasmMemoryGrow) \
+ V(WasmTableInit) \
+ V(WasmTableCopy) \
V(WasmTableGet) \
V(WasmTableSet) \
V(WasmStackGuard) \
@@ -124,13 +129,17 @@ class V8_EXPORT_PRIVATE WasmCode final {
Address instruction_start() const {
return reinterpret_cast<Address>(instructions_.begin());
}
- Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
+ Vector<const byte> reloc_info() const {
+ return {protected_instructions_data().end(),
+ static_cast<size_t>(reloc_info_size_)};
+ }
Vector<const byte> source_positions() const {
- return source_position_table_.as_vector();
+ return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
}
+ // TODO(clemensb): Make this return int.
uint32_t index() const {
- DCHECK(!IsAnonymous());
+ DCHECK_LE(0, index_);
return index_;
}
// Anonymous functions are functions that don't carry an index.
@@ -140,25 +149,31 @@ class V8_EXPORT_PRIVATE WasmCode final {
ExecutionTier tier() const { return tier_; }
Address constant_pool() const;
Address handler_table() const;
- uint32_t handler_table_size() const;
+ int handler_table_size() const;
Address code_comments() const;
- uint32_t code_comments_size() const;
- size_t constant_pool_offset() const { return constant_pool_offset_; }
- size_t safepoint_table_offset() const { return safepoint_table_offset_; }
- size_t handler_table_offset() const { return handler_table_offset_; }
- size_t code_comments_offset() const { return code_comments_offset_; }
- size_t unpadded_binary_size() const { return unpadded_binary_size_; }
- uint32_t stack_slots() const { return stack_slots_; }
- uint32_t tagged_parameter_slots() const { return tagged_parameter_slots_; }
+ int code_comments_size() const;
+ int constant_pool_offset() const { return constant_pool_offset_; }
+ int safepoint_table_offset() const { return safepoint_table_offset_; }
+ int handler_table_offset() const { return handler_table_offset_; }
+ int code_comments_offset() const { return code_comments_offset_; }
+ int unpadded_binary_size() const { return unpadded_binary_size_; }
+ int stack_slots() const { return stack_slots_; }
+ int tagged_parameter_slots() const { return tagged_parameter_slots_; }
bool is_liftoff() const { return tier_ == ExecutionTier::kLiftoff; }
bool contains(Address pc) const {
return reinterpret_cast<Address>(instructions_.begin()) <= pc &&
pc < reinterpret_cast<Address>(instructions_.end());
}
- Vector<trap_handler::ProtectedInstructionData> protected_instructions()
+ Vector<const uint8_t> protected_instructions_data() const {
+ return {meta_data_.get(),
+ static_cast<size_t>(protected_instructions_size_)};
+ }
+
+ Vector<const trap_handler::ProtectedInstructionData> protected_instructions()
const {
- return protected_instructions_.as_vector();
+ return Vector<const trap_handler::ProtectedInstructionData>::cast(
+ protected_instructions_data());
}
void Validate() const;
@@ -206,25 +221,25 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
- STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
-
private:
friend class NativeModule;
- WasmCode(NativeModule* native_module, uint32_t index,
- Vector<byte> instructions, uint32_t stack_slots,
- uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
- size_t handler_table_offset, size_t constant_pool_offset,
- size_t code_comments_offset, size_t unpadded_binary_size,
- OwnedVector<trap_handler::ProtectedInstructionData>
- protected_instructions,
- OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, Kind kind,
+ WasmCode(NativeModule* native_module, int index, Vector<byte> instructions,
+ int stack_slots, int tagged_parameter_slots,
+ int safepoint_table_offset, int handler_table_offset,
+ int constant_pool_offset, int code_comments_offset,
+ int unpadded_binary_size,
+ Vector<const byte> protected_instructions_data,
+ Vector<const byte> reloc_info,
+ Vector<const byte> source_position_table, Kind kind,
ExecutionTier tier)
: instructions_(instructions),
- reloc_info_(std::move(reloc_info)),
- source_position_table_(std::move(source_position_table)),
native_module_(native_module),
+ meta_data_(ConcatenateBytes(
+ {protected_instructions_data, reloc_info, source_position_table})),
+ reloc_info_size_(reloc_info.length()),
+ source_positions_size_(source_position_table.length()),
+ protected_instructions_size_(protected_instructions_data.length()),
index_(index),
kind_(kind),
constant_pool_offset_(constant_pool_offset),
@@ -234,7 +249,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
handler_table_offset_(handler_table_offset),
code_comments_offset_(code_comments_offset),
unpadded_binary_size_(unpadded_binary_size),
- protected_instructions_(std::move(protected_instructions)),
tier_(tier) {
DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
DCHECK_LE(handler_table_offset, unpadded_binary_size);
@@ -242,6 +256,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
DCHECK_LE(constant_pool_offset, unpadded_binary_size);
}
+ std::unique_ptr<const byte[]> ConcatenateBytes(
+ std::initializer_list<Vector<const byte>>);
+
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
int trap_handler_index() const {
@@ -263,25 +280,30 @@ class V8_EXPORT_PRIVATE WasmCode final {
V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
Vector<byte> instructions_;
- OwnedVector<const byte> reloc_info_;
- OwnedVector<const byte> source_position_table_;
NativeModule* native_module_ = nullptr;
- uint32_t index_;
+ // {meta_data_} contains several byte vectors concatenated into one:
+ // - protected instructions data of size {protected_instructions_size_}
+ // - relocation info of size {reloc_info_size_}
+ // - source positions of size {source_positions_size_}
+ // Note that the protected instructions come first to ensure alignment.
+ std::unique_ptr<const byte[]> meta_data_;
+ const int reloc_info_size_;
+ const int source_positions_size_;
+ const int protected_instructions_size_;
+ int index_;
Kind kind_;
- size_t constant_pool_offset_ = 0;
- uint32_t stack_slots_ = 0;
+ int constant_pool_offset_ = 0;
+ int stack_slots_ = 0;
// Number of tagged parameters passed to this function via the stack. This
// value is used by the stack walker (e.g. GC) to find references.
- uint32_t tagged_parameter_slots_ = 0;
- // we care about safepoint data for wasm-to-js functions,
- // since there may be stack/register tagged values for large number
- // conversions.
- size_t safepoint_table_offset_ = 0;
- size_t handler_table_offset_ = 0;
- size_t code_comments_offset_ = 0;
- size_t unpadded_binary_size_ = 0;
+ int tagged_parameter_slots_ = 0;
+ // We care about safepoint data for wasm-to-js functions, since there may be
+ // stack/register tagged values for large number conversions.
+ int safepoint_table_offset_ = 0;
+ int handler_table_offset_ = 0;
+ int code_comments_offset_ = 0;
+ int unpadded_binary_size_ = 0;
int trap_handler_index_ = -1;
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
ExecutionTier tier_;
// WasmCode is ref counted. Counters are held by:
@@ -299,6 +321,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
+// Check that {WasmCode} objects are sufficiently small. We create many of them,
+// often for rather small functions.
+// Increase the limit if needed, but first check if the size increase is
+// justified.
+STATIC_ASSERT(sizeof(WasmCode) <= 96);
+
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
// Return a textual description of the kind.
@@ -327,7 +355,6 @@ class WasmCodeAllocator {
};
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
- bool can_request_more,
std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
@@ -391,10 +418,6 @@ class WasmCodeAllocator {
bool is_executable_ = false;
- // TODO(clemensb): Remove this field once multiple code spaces are supported
- // everywhere.
- const bool can_request_more_memory_;
-
std::shared_ptr<Counters> async_counters_;
};
@@ -409,13 +432,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
// The returned code still needs to be published via {PublishCode}.
- std::unique_ptr<WasmCode> AddCode(
- uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- uint32_t tagged_parameter_slots,
- OwnedVector<trap_handler::ProtectedInstructionData>
- protected_instructions,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier);
+ std::unique_ptr<WasmCode> AddCode(int index, const CodeDesc& desc,
+ int stack_slots, int tagged_parameter_slots,
+ Vector<const byte> protected_instructions,
+ Vector<const byte> source_position_table,
+ WasmCode::Kind kind, ExecutionTier tier);
// {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the
@@ -425,15 +446,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode(
- uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
- uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
- size_t handler_table_offset, size_t constant_pool_offset,
- size_t code_comments_offset, size_t unpadded_binary_size,
- OwnedVector<trap_handler::ProtectedInstructionData>
- protected_instructions,
- OwnedVector<const byte> reloc_info,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier);
+ int index, Vector<const byte> instructions, int stack_slots,
+ int tagged_parameter_slots, int safepoint_table_offset,
+ int handler_table_offset, int constant_pool_offset,
+ int code_comments_offset, int unpadded_binary_size,
+ Vector<const byte> protected_instructions_data,
+ Vector<const byte> reloc_info, Vector<const byte> source_position_table,
+ WasmCode::Kind kind, ExecutionTier tier);
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
@@ -449,6 +468,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* GetCode(uint32_t index) const;
bool HasCode(uint32_t index) const;
+ bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const;
void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
WasmModuleSourceMap* GetWasmSourceMap() const;
@@ -465,23 +485,25 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address GetCallTargetForFunction(uint32_t func_index) const;
struct JumpTablesRef {
- const Address jump_table_start;
- const Address far_jump_table_start;
+ const Address jump_table_start = kNullAddress;
+ const Address far_jump_table_start = kNullAddress;
+
+ bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
- // Finds the jump tables that should be used for the code at {code_addr}. This
+ // Finds the jump tables that should be used for given code region. This
// information is then passed to {GetNearCallTargetForFunction} and
// {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
- // up there.
- JumpTablesRef FindJumpTablesForCode(Address code_addr) const;
+ // up there. Return an empty struct if no suitable jump tables exist.
+ JumpTablesRef FindJumpTablesForRegion(base::AddressRegion) const;
// Similarly to {GetCallTargetForFunction}, but uses the jump table previously
- // looked up via {FindJumpTablesForCode}.
+ // looked up via {FindJumpTablesForRegion}.
Address GetNearCallTargetForFunction(uint32_t func_index,
const JumpTablesRef&) const;
// Get a runtime stub entry (which is a far jump table slot) in the jump table
- // previously looked up via {FindJumpTablesForCode}.
+ // previously looked up via {FindJumpTablesForRegion}.
Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
const JumpTablesRef&) const;
@@ -552,6 +574,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// by publishing an entry stub with the {Kind::kInterpreterEntry} code kind.
bool IsRedirectedToInterpreter(uint32_t func_index);
+ // Set {tier_down_} flag. Return previous state.
+ bool SetTieredDown();
+ bool IsTieredDown();
+
// Sets the flag, triggers recompilation of all methods to tier down or up,
// waits for that to complete.
void TierDown(Isolate* isolate);
@@ -584,22 +610,21 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
- bool can_request_more, VirtualMemory code_space,
+ VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this);
std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
- uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
- uint32_t tagged_parameter_slots,
- OwnedVector<trap_handler::ProtectedInstructionData>
- protected_instructions,
- OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
+ int index, const CodeDesc& desc, int stack_slots,
+ int tagged_parameter_slots,
+ Vector<const byte> protected_instructions_data,
+ Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, Vector<uint8_t> code_space,
const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableInRegion(
- uint32_t jump_table_size, base::AddressRegion,
+ int jump_table_size, base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
// Hold the {allocation_mutex_} when calling one of these methods.
@@ -656,10 +681,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {WireBytesStorage}, held by background compile tasks.
std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
- // Jump table used by external calls (from JS). Wasm calls use one of the jump
- // tables stored in {code_space_data_}.
+ // The first allocated jump table. Always used by external calls (from JS).
+ // Wasm calls might use one of the other jump tables stored in
+ // {code_space_data_}.
WasmCode* main_jump_table_ = nullptr;
+ // The first allocated far jump table.
+ WasmCode* main_far_jump_table_ = nullptr;
+
// Lazy compile stub table, containing entries to jump to the
// {WasmCompileLazy} builtin, passing the function index.
WasmCode* lazy_compile_table_ = nullptr;
@@ -757,7 +786,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
std::shared_ptr<NativeModule> NewNativeModule(
WasmEngine* engine, Isolate* isolate,
const WasmFeatures& enabled_features, size_t code_size_estimate,
- bool can_request_more, std::shared_ptr<const WasmModule> module);
+ std::shared_ptr<const WasmModule> module);
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
void* hint = nullptr);
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 956322f54a..acce1eef67 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -108,7 +108,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
constexpr uint32_t kExceptionAttribute = 0;
-constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+constexpr int kAnonymousFuncIndex = -1;
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.cc b/deps/v8/src/wasm/wasm-debug-evaluate.cc
new file mode 100644
index 0000000000..80f0b04e0e
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.cc
@@ -0,0 +1,274 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-debug-evaluate.h"
+
+#include <algorithm>
+
+#include "src/api/api-inl.h"
+#include "src/codegen/machine-type.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-arguments.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace {
+
+static Handle<String> V8String(Isolate* isolate, const char* str) {
+ return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+
+static bool CheckSignature(ValueType return_type,
+ std::initializer_list<ValueType> argument_types,
+ const FunctionSig* sig, ErrorThrower* thrower) {
+ if (sig->return_count() != 1 && return_type != kWasmBottom) {
+ thrower->CompileError("Invalid return type. Got none, expected %s",
+ return_type.type_name());
+ return false;
+ }
+
+ if (sig->return_count() == 1) {
+ if (sig->GetReturn(0) != return_type) {
+ thrower->CompileError("Invalid return type. Got %s, expected %s",
+ sig->GetReturn(0).type_name(),
+ return_type.type_name());
+ return false;
+ }
+ }
+
+ if (sig->parameter_count() != argument_types.size()) {
+ thrower->CompileError("Invalid number of arguments. Expected %zu, got %zu",
+ sig->parameter_count(), argument_types.size());
+ return false;
+ }
+ size_t p = 0;
+ for (ValueType argument_type : argument_types) {
+ if (sig->GetParam(p) != argument_type) {
+ thrower->CompileError(
+ "Invalid argument type for argument %zu. Got %s, expected %s", p,
+ sig->GetParam(p).type_name(), argument_type.type_name());
+ return false;
+ }
+ ++p;
+ }
+ return true;
+}
+
+static bool CheckRangeOutOfBounds(uint32_t offset, uint32_t size,
+ size_t allocation_size,
+ wasm::ErrorThrower* thrower) {
+ if (size > std::numeric_limits<uint32_t>::max() - offset) {
+ thrower->RuntimeError("Overflowing memory range\n");
+ return true;
+ }
+ if (offset + size > allocation_size) {
+ thrower->RuntimeError("Illegal access to out-of-bounds memory");
+ return true;
+ }
+ return false;
+}
+
+class DebugEvaluatorProxy {
+ public:
+ explicit DebugEvaluatorProxy(Isolate* isolate) : isolate_(isolate) {}
+
+ static void GetMemoryTrampoline(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DebugEvaluatorProxy& proxy = GetProxy(args);
+
+ uint32_t offset = proxy.GetArgAsUInt32(args, 0);
+ uint32_t size = proxy.GetArgAsUInt32(args, 1);
+ uint32_t result = proxy.GetArgAsUInt32(args, 2);
+
+ proxy.GetMemory(offset, size, result);
+ }
+
+ void GetMemory(uint32_t offset, uint32_t size, uint32_t result) {
+ wasm::ScheduledErrorThrower thrower(isolate_, "debug evaluate proxy");
+ // Check all overflows.
+ if (CheckRangeOutOfBounds(result, size, debuggee_->memory_size(),
+ &thrower) ||
+ CheckRangeOutOfBounds(offset, size, evaluator_->memory_size(),
+ &thrower)) {
+ return;
+ }
+
+ std::memcpy(&evaluator_->memory_start()[result],
+ &debuggee_->memory_start()[offset], size);
+ }
+
+ template <typename CallableT>
+ Handle<JSReceiver> WrapAsV8Function(CallableT callback) {
+ v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Local<v8::Context> context = api_isolate->GetCurrentContext();
+ std::string data;
+ v8::Local<v8::Function> func =
+ v8::Function::New(context, callback,
+ v8::External::New(api_isolate, this))
+ .ToLocalChecked();
+
+ return Utils::OpenHandle(*func);
+ }
+
+ Handle<JSObject> CreateImports() {
+ Handle<JSObject> imports_obj =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ Handle<JSObject> import_module_obj =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ Object::SetProperty(isolate_, imports_obj,
+ isolate_->factory()->empty_string(), import_module_obj)
+ .Assert();
+
+ Object::SetProperty(
+ isolate_, import_module_obj, V8String(isolate_, "__getMemory"),
+ WrapAsV8Function(DebugEvaluatorProxy::GetMemoryTrampoline))
+ .Assert();
+ return imports_obj;
+ }
+
+ void SetInstances(Handle<WasmInstanceObject> evaluator,
+ Handle<WasmInstanceObject> debuggee) {
+ evaluator_ = evaluator;
+ debuggee_ = debuggee;
+ }
+
+ private:
+ uint32_t GetArgAsUInt32(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int index) {
+ // No type/range checks needed on his because this is only called for {args}
+ // where we have performed a signature check via {VerifyEvaluatorInterface}
+ double number = Utils::OpenHandle(*args[index])->Number();
+ return static_cast<uint32_t>(number);
+ }
+
+ static DebugEvaluatorProxy& GetProxy(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ return *reinterpret_cast<DebugEvaluatorProxy*>(
+ args.Data().As<v8::External>()->Value());
+ }
+
+ Isolate* isolate_;
+ Handle<WasmInstanceObject> evaluator_;
+ Handle<WasmInstanceObject> debuggee_;
+};
+
+static bool VerifyEvaluatorInterface(const WasmModule* raw_module,
+ const ModuleWireBytes& bytes,
+ ErrorThrower* thrower) {
+ for (const WasmFunction& F : raw_module->functions) {
+ WireBytesRef name_ref = raw_module->function_names.Lookup(
+ bytes, F.func_index, VectorOf(raw_module->export_table));
+ std::string name(bytes.start() + name_ref.offset(),
+ bytes.start() + name_ref.end_offset());
+ if (F.exported && name == "wasm_format") {
+ if (!CheckSignature(kWasmI32, {}, F.sig, thrower)) return false;
+ } else if (F.imported) {
+ if (name == "__getMemory") {
+ if (!CheckSignature(kWasmBottom, {kWasmI32, kWasmI32, kWasmI32}, F.sig,
+ thrower)) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+Maybe<std::string> DebugEvaluateImpl(
+ Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
+ WasmInterpreter::FramePtr frame) {
+ Isolate* isolate = debuggee_instance->GetIsolate();
+ HandleScope handle_scope(isolate);
+ WasmEngine* engine = isolate->wasm_engine();
+ wasm::ErrorThrower thrower(isolate, "wasm debug evaluate");
+
+ // Create module object.
+ wasm::ModuleWireBytes bytes(snippet);
+ wasm::WasmFeatures features = wasm::WasmFeatures::FromIsolate(isolate);
+ Handle<WasmModuleObject> evaluator_module;
+ if (!engine->SyncCompile(isolate, features, &thrower, bytes)
+ .ToHandle(&evaluator_module)) {
+ return Nothing<std::string>();
+ }
+
+ // Verify interface.
+ const WasmModule* raw_module = evaluator_module->module();
+ if (!VerifyEvaluatorInterface(raw_module, bytes, &thrower)) {
+ return Nothing<std::string>();
+ }
+
+ // Set up imports.
+ DebugEvaluatorProxy proxy(isolate);
+ Handle<JSObject> imports = proxy.CreateImports();
+
+ // Instantiate Module.
+ Handle<WasmInstanceObject> evaluator_instance;
+ if (!engine->SyncInstantiate(isolate, &thrower, evaluator_module, imports, {})
+ .ToHandle(&evaluator_instance)) {
+ return Nothing<std::string>();
+ }
+
+ proxy.SetInstances(evaluator_instance, debuggee_instance);
+
+ Handle<JSObject> exports_obj(evaluator_instance->exports_object(), isolate);
+ Handle<Object> entry_point_obj;
+ bool get_property_success =
+ Object::GetProperty(isolate, exports_obj,
+ V8String(isolate, "wasm_format"))
+ .ToHandle(&entry_point_obj);
+ if (!get_property_success ||
+ !WasmExportedFunction::IsWasmExportedFunction(*entry_point_obj)) {
+ thrower.LinkError("Missing export: \"wasm_format\"");
+ return Nothing<std::string>();
+ }
+ Handle<WasmExportedFunction> entry_point =
+ Handle<WasmExportedFunction>::cast(entry_point_obj);
+
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(evaluator_instance);
+ Handle<Code> wasm_entry =
+ WasmDebugInfo::GetCWasmEntry(debug_info, entry_point->sig());
+ CWasmArgumentsPacker packer(4 /* uint32_t return value, no parameters. */);
+ Execution::CallWasm(isolate, wasm_entry, entry_point->GetWasmCallTarget(),
+ evaluator_instance, packer.argv());
+ if (isolate->has_pending_exception()) return Nothing<std::string>();
+
+ uint32_t offset = packer.Pop<uint32_t>();
+ if (CheckRangeOutOfBounds(offset, 0, evaluator_instance->memory_size(),
+ &thrower)) {
+ return Nothing<std::string>();
+ }
+
+ // Copy the zero-terminated string result but don't overflow.
+ std::string result;
+ byte* heap = evaluator_instance->memory_start() + offset;
+ for (; offset < evaluator_instance->memory_size(); ++offset, ++heap) {
+ if (*heap == 0) return Just(result);
+ result.push_back(*heap);
+ }
+
+ thrower.RuntimeError("The evaluation returned an invalid result");
+ return Nothing<std::string>();
+}
+
+MaybeHandle<String> DebugEvaluate(Vector<const byte> snippet,
+ Handle<WasmInstanceObject> debuggee_instance,
+ WasmInterpreter::FramePtr frame) {
+ Maybe<std::string> result =
+ DebugEvaluateImpl(snippet, debuggee_instance, std::move(frame));
+ if (result.IsNothing()) return {};
+ std::string result_str = result.ToChecked();
+ return V8String(debuggee_instance->GetIsolate(), result_str.c_str());
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.h b/deps/v8/src/wasm/wasm-debug-evaluate.h
new file mode 100644
index 0000000000..21543eb97a
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.h
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_DEBUG_EVALUATE_H_
+#define V8_WASM_WASM_DEBUG_EVALUATE_H_
+
+#include "src/base/macros.h"
+#include "src/handles/maybe-handles.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+MaybeHandle<String> V8_EXPORT_PRIVATE DebugEvaluate(
+ Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
+ WasmInterpreter::FramePtr frame);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_DEBUG_EVALUATE_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 230427bed0..10a2e194a7 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -17,6 +17,7 @@
#include "src/heap/factory.h"
#include "src/utils/identity-map.h"
#include "src/wasm/baseline/liftoff-compiler.h"
+#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-interpreter.h"
@@ -48,23 +49,23 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
}
Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
- switch (value.type()) {
- case kWasmI32:
+ switch (value.type().kind()) {
+ case ValueType::kI32:
if (Smi::IsValid(value.to<int32_t>()))
return handle(Smi::FromInt(value.to<int32_t>()), isolate);
return PrintFToOneByteString<false>(isolate, "%d", value.to<int32_t>());
- case kWasmI64: {
+ case ValueType::kI64: {
int64_t i64 = value.to<int64_t>();
int32_t i32 = static_cast<int32_t>(i64);
if (i32 == i64 && Smi::IsValid(i32))
return handle(Smi::FromIntptr(i32), isolate);
return PrintFToOneByteString<false>(isolate, "%" PRId64, i64);
}
- case kWasmF32:
+ case ValueType::kF32:
return isolate->factory()->NewNumber(value.to<float>());
- case kWasmF64:
+ case ValueType::kF64:
return isolate->factory()->NewNumber(value.to<double>());
- case kWasmAnyRef:
+ case ValueType::kAnyRef:
return value.to_anyref();
default:
UNIMPLEMENTED();
@@ -164,7 +165,7 @@ class InterpreterHandle {
Vector<WasmValue> argument_values,
Vector<WasmValue> return_values) {
DCHECK_GE(module()->functions.size(), func_index);
- FunctionSig* sig = module()->functions[func_index].sig;
+ const FunctionSig* sig = module()->functions[func_index].sig;
DCHECK_EQ(sig->parameter_count(), argument_values.size());
DCHECK_EQ(sig->return_count(), return_values.size());
@@ -189,8 +190,11 @@ class InterpreterHandle {
case WasmInterpreter::State::TRAPPED: {
MessageTemplate message_id =
WasmOpcodes::TrapReasonToMessageId(thread->GetTrapReason());
- Handle<Object> exception =
+ Handle<JSObject> exception =
isolate_->factory()->NewWasmRuntimeError(message_id);
+ JSObject::AddProperty(isolate_, exception,
+ isolate_->factory()->wasm_uncatchable_symbol(),
+ isolate_->factory()->true_value(), NONE);
auto result = thread->RaiseException(isolate_, exception);
if (result == WasmInterpreter::Thread::HANDLED) break;
// If no local handler was found, we fall-thru to {STOPPED}.
@@ -383,9 +387,8 @@ class InterpreterHandle {
Handle<JSObject> local_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
// Fill parameters and locals.
- int num_params = frame->GetParameterCount();
int num_locals = frame->GetLocalCount();
- DCHECK_LE(num_params, num_locals);
+ DCHECK_LE(frame->GetParameterCount(), num_locals);
if (num_locals > 0) {
Handle<JSObject> locals_obj =
isolate_->factory()->NewJSObjectWithNullProto();
@@ -400,10 +403,7 @@ class InterpreterHandle {
if (!GetLocalNameString(isolate, native_module,
frame->function()->func_index, i)
.ToHandle(&name)) {
- // Parameters should come before locals in alphabetical ordering, so
- // we name them "args" here.
- const char* label = i < num_params ? "arg#%d" : "local#%d";
- name = PrintFToOneByteString<true>(isolate_, label, i);
+ name = PrintFToOneByteString<true>(isolate_, "var%d", i);
}
WasmValue value = frame->GetLocalValue(i);
Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
@@ -439,10 +439,92 @@ class InterpreterHandle {
return local_scope_object;
}
+ Handle<JSObject> GetStackScopeObject(InterpretedFrame* frame,
+ Handle<WasmDebugInfo> debug_info) {
+ // Fill stack values.
+ int stack_count = frame->GetStackHeight();
+ // Use an object without prototype instead of an Array, for nicer displaying
+ // in DevTools. For Arrays, the length field and prototype is displayed,
+ // which does not make too much sense here.
+ Handle<JSObject> stack_scope_obj =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ for (int i = 0; i < stack_count; ++i) {
+ WasmValue value = frame->GetStackValue(i);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
+ JSObject::AddDataElement(stack_scope_obj, static_cast<uint32_t>(i),
+ value_obj, NONE);
+ }
+ return stack_scope_obj;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterHandle);
};
+int FindByteOffset(int pc_offset, WasmCode* wasm_code) {
+ int position = 0;
+ SourcePositionTableIterator iterator(wasm_code->source_positions());
+ for (SourcePositionTableIterator iterator(wasm_code->source_positions());
+ !iterator.done() && iterator.code_offset() < pc_offset;
+ iterator.Advance()) {
+ position = iterator.source_position().ScriptOffset();
+ }
+ return position;
+}
+
+// Generate a sorted and deduplicated list of byte offsets for this function's
+// current positions on the stack.
+std::vector<int> StackFramePositions(int func_index, Isolate* isolate) {
+ std::vector<int> byte_offsets;
+ WasmCodeRefScope code_ref_scope;
+ for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (!it.is_wasm()) continue;
+ WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ if (static_cast<int>(frame->function_index()) != func_index) continue;
+ WasmCode* wasm_code = frame->wasm_code();
+ if (!wasm_code->is_liftoff()) continue;
+ int pc_offset =
+ static_cast<int>(frame->pc() - wasm_code->instruction_start());
+ int byte_offset = FindByteOffset(pc_offset, wasm_code);
+ byte_offsets.push_back(byte_offset);
+ }
+ std::sort(byte_offsets.begin(), byte_offsets.end());
+ auto last = std::unique(byte_offsets.begin(), byte_offsets.end());
+ byte_offsets.erase(last, byte_offsets.end());
+ return byte_offsets;
+}
+
+enum ReturnLocation { kAfterBreakpoint, kAfterWasmCall };
+
+Address FindNewPC(WasmCode* wasm_code, int byte_offset,
+ ReturnLocation return_location) {
+ Vector<const uint8_t> new_pos_table = wasm_code->source_positions();
+
+ DCHECK_LE(0, byte_offset);
+
+ // If {return_location == kAfterBreakpoint} we search for the first code
+ // offset which is marked as instruction (i.e. not the breakpoint).
+ // If {return_location == kAfterWasmCall} we return the last code offset
+ // associated with the byte offset.
+ SourcePositionTableIterator it(new_pos_table);
+ while (!it.done() && it.source_position().ScriptOffset() != byte_offset) {
+ it.Advance();
+ }
+ if (return_location == kAfterBreakpoint) {
+ while (!it.is_statement()) it.Advance();
+ DCHECK_EQ(byte_offset, it.source_position().ScriptOffset());
+ return wasm_code->instruction_start() + it.code_offset();
+ }
+
+ DCHECK_EQ(kAfterWasmCall, return_location);
+ int code_offset;
+ do {
+ code_offset = it.code_offset();
+ it.Advance();
+ } while (!it.done() && it.source_position().ScriptOffset() == byte_offset);
+ return wasm_code->instruction_start() + code_offset;
+}
+
} // namespace
Handle<JSObject> GetGlobalScopeObject(Handle<WasmInstanceObject> instance) {
@@ -470,9 +552,13 @@ Handle<JSObject> GetGlobalScopeObject(Handle<WasmInstanceObject> instance) {
JSObject::AddProperty(isolate, global_scope_object, globals_name,
globals_obj, NONE);
- for (size_t i = 0; i < globals.size(); ++i) {
- const char* label = "global#%d";
- Handle<String> name = PrintFToOneByteString<true>(isolate, label, i);
+ for (uint32_t i = 0; i < globals.size(); ++i) {
+ Handle<String> name;
+ if (!WasmInstanceObject::GetGlobalNameOrNull(isolate, instance, i)
+ .ToHandle(&name)) {
+ const char* label = "global%d";
+ name = PrintFToOneByteString<true>(isolate, label, i);
+ }
WasmValue value =
WasmInstanceObject::GetGlobalValue(instance, globals[i]);
Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
@@ -488,8 +574,8 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
- Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc,
- Address fp) {
+ Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
+ Address debug_break_fp) {
Handle<JSObject> local_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
@@ -499,18 +585,16 @@ class DebugInfoImpl {
// Only Liftoff code can be inspected.
if (!code->is_liftoff()) return local_scope_object;
- const WasmModule* module = native_module_->module();
- const WasmFunction* function = &module->functions[code->index()];
- DebugSideTable* debug_side_table =
- GetDebugSideTable(isolate->allocator(), function->func_index);
+ auto* module = native_module_->module();
+ auto* function = &module->functions[code->index()];
+ auto* debug_side_table = GetDebugSideTable(code, isolate->allocator());
int pc_offset = static_cast<int>(pc - code->instruction_start());
auto* debug_side_table_entry = debug_side_table->GetEntry(pc_offset);
DCHECK_NOT_NULL(debug_side_table_entry);
// Fill parameters and locals.
- int num_params = static_cast<int>(function->sig->parameter_count());
int num_locals = static_cast<int>(debug_side_table->num_locals());
- DCHECK_LE(num_params, num_locals);
+ DCHECK_LE(static_cast<int>(function->sig->parameter_count()), num_locals);
if (num_locals > 0) {
Handle<JSObject> locals_obj =
isolate->factory()->NewJSObjectWithNullProto();
@@ -523,14 +607,10 @@ class DebugInfoImpl {
if (!GetLocalNameString(isolate, native_module_, function->func_index,
i)
.ToHandle(&name)) {
- // Parameters should come before locals in alphabetical ordering, so
- // we name them "args" here.
- const char* label = i < num_params ? "arg#%d" : "local#%d";
- name = PrintFToOneByteString<true>(isolate, label, i);
+ name = PrintFToOneByteString<true>(isolate, "var%d", i);
}
WasmValue value =
- GetValue(debug_side_table_entry, debug_side_table->local_type(i), i,
- fp - debug_side_table->local_stack_offset(i));
+ GetValue(debug_side_table_entry, i, fp, debug_break_fp);
Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
// {name} can be a string representation of an element index.
LookupIterator::Key lookup_key{isolate, name};
@@ -545,7 +625,6 @@ class DebugInfoImpl {
}
// Fill stack values.
- int stack_count = debug_side_table_entry->stack_height();
// Use an object without prototype instead of an Array, for nicer displaying
// in DevTools. For Arrays, the length field and prototype is displayed,
// which does not make too much sense here.
@@ -554,17 +633,48 @@ class DebugInfoImpl {
isolate->factory()->InternalizeString(StaticCharVector("stack"));
JSObject::AddProperty(isolate, local_scope_object, stack_name, stack_obj,
NONE);
- for (int i = 0; i < stack_count; ++i) {
- ValueType type = debug_side_table_entry->stack_type(i);
- WasmValue value = GetValue(debug_side_table_entry, type, num_locals + i,
- fp - debug_side_table_entry->stack_offset(i));
+ int value_count = debug_side_table_entry->num_values();
+ for (int i = num_locals; i < value_count; ++i) {
+ WasmValue value = GetValue(debug_side_table_entry, i, fp, debug_break_fp);
Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
- JSObject::AddDataElement(stack_obj, static_cast<uint32_t>(i), value_obj,
- NONE);
+ JSObject::AddDataElement(stack_obj, static_cast<uint32_t>(i - num_locals),
+ value_obj, NONE);
}
return local_scope_object;
}
+ Handle<JSObject> GetStackScopeObject(Isolate* isolate, Address pc, Address fp,
+ Address debug_break_fp) {
+ Handle<JSObject> stack_scope_obj =
+ isolate->factory()->NewJSObjectWithNullProto();
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+
+ wasm::WasmCode* code =
+ isolate->wasm_engine()->code_manager()->LookupCode(pc);
+ // Only Liftoff code can be inspected.
+ if (!code->is_liftoff()) return stack_scope_obj;
+
+ auto* debug_side_table = GetDebugSideTable(code, isolate->allocator());
+ int pc_offset = static_cast<int>(pc - code->instruction_start());
+ auto* debug_side_table_entry = debug_side_table->GetEntry(pc_offset);
+ DCHECK_NOT_NULL(debug_side_table_entry);
+
+ // Fill stack values.
+ // Use an object without prototype instead of an Array, for nicer displaying
+ // in DevTools. For Arrays, the length field and prototype is displayed,
+ // which does not make too much sense here.
+ int num_locals = static_cast<int>(debug_side_table->num_locals());
+ int value_count = debug_side_table_entry->num_values();
+ for (int i = num_locals; i < value_count; ++i) {
+ WasmValue value = GetValue(debug_side_table_entry, i, fp, debug_break_fp);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
+ JSObject::AddDataElement(stack_scope_obj,
+ static_cast<uint32_t>(i - num_locals), value_obj,
+ NONE);
+ }
+ return stack_scope_obj;
+ }
+
WireBytesRef GetLocalName(int func_index, int local_index) {
base::MutexGuard guard(&mutex_);
if (!local_names_) {
@@ -574,13 +684,56 @@ class DebugInfoImpl {
return local_names_->GetName(func_index, local_index);
}
- void SetBreakpoint(int func_index, int offset) {
+ void RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets,
+ Isolate* current_isolate) {
+ if (func_index == flooded_function_index_) {
+ // We should not be flooding a function that is already flooded.
+ DCHECK(!(offsets.size() == 1 && offsets[0] == 0));
+ flooded_function_index_ = -1;
+ }
+ // Recompile the function with Liftoff, setting the new breakpoints.
+ // Not thread-safe. The caller is responsible for locking {mutex_}.
+ CompilationEnv env = native_module_->CreateCompilationEnv();
+ auto* function = &native_module_->module()->functions[func_index];
+ Vector<const uint8_t> wire_bytes = native_module_->wire_bytes();
+ FunctionBody body{function->sig, function->code.offset(),
+ wire_bytes.begin() + function->code.offset(),
+ wire_bytes.begin() + function->code.end_offset()};
+ std::unique_ptr<DebugSideTable> debug_sidetable;
+
+ // Generate additional source positions for current stack frame positions.
+ // These source positions are used to find return addresses in the new code.
+ std::vector<int> stack_frame_positions =
+ StackFramePositions(func_index, current_isolate);
+
+ WasmCompilationResult result = ExecuteLiftoffCompilation(
+ native_module_->engine()->allocator(), &env, body, func_index, nullptr,
+ nullptr, offsets, &debug_sidetable, VectorOf(stack_frame_positions));
+ // Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
+ // support for debugging.
+ if (!result.succeeded()) FATAL("Liftoff compilation failed");
+ DCHECK_NOT_NULL(debug_sidetable);
+
+ WasmCodeRefScope wasm_code_ref_scope;
+ WasmCode* new_code = native_module_->AddCompiledCode(std::move(result));
+ bool added =
+ debug_side_tables_.emplace(new_code, std::move(debug_sidetable)).second;
+ DCHECK(added);
+ USE(added);
+
+ UpdateReturnAddresses(current_isolate, new_code);
+ }
+
+ void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
// Hold the mutex while setting the breakpoint. This guards against multiple
// isolates setting breakpoints at the same time. We don't really support
// that scenario yet, but concurrently compiling and installing different
// Liftoff variants of a function would be problematic.
base::MutexGuard guard(&mutex_);
+ // offset == 0 indicates flooding and should not happen here.
+ DCHECK_NE(0, offset);
+
std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
auto insertion_point =
std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
@@ -590,87 +743,222 @@ class DebugInfoImpl {
}
breakpoints.insert(insertion_point, offset);
- // Recompile the function with Liftoff, setting the new breakpoints.
- CompilationEnv env = native_module_->CreateCompilationEnv();
- auto* function = &native_module_->module()->functions[func_index];
- Vector<const uint8_t> wire_bytes = native_module_->wire_bytes();
- FunctionBody body{function->sig, function->code.offset(),
- wire_bytes.begin() + function->code.offset(),
- wire_bytes.begin() + function->code.end_offset()};
- WasmCompilationResult result = ExecuteLiftoffCompilation(
- native_module_->engine()->allocator(), &env, body, func_index, nullptr,
- nullptr, VectorOf(breakpoints));
- DCHECK(result.succeeded());
+ // No need to recompile if the function is already flooded.
+ if (func_index == flooded_function_index_) return;
- WasmCodeRefScope wasm_code_ref_scope;
- WasmCode* new_code = native_module_->AddCompiledCode(std::move(result));
+ RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints),
+ current_isolate);
+ }
- // TODO(clemensb): OSR active frames on the stack (on all threads).
- USE(new_code);
+ void FloodWithBreakpoints(int func_index, Isolate* current_isolate) {
+ base::MutexGuard guard(&mutex_);
+ // 0 is an invalid offset used to indicate flooding.
+ int offset = 0;
+ RecompileLiftoffWithBreakpoints(func_index, Vector<int>(&offset, 1),
+ current_isolate);
}
- private:
- DebugSideTable* GetDebugSideTable(AccountingAllocator* allocator,
- int func_index) {
+ void PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
+ StackTraceFrameIterator it(isolate, break_frame_id);
+ DCHECK(!it.done());
+ DCHECK(it.frame()->is_wasm_compiled());
+ WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ StepAction step_action = isolate->debug()->last_step_action();
+
+ // If we are at a return instruction, then any stepping action is equivalent
+ // to StepOut, and we need to flood the parent function.
+ if (IsAtReturn(frame) || step_action == StepOut) {
+ it.Advance();
+ if (it.done() || !it.frame()->is_wasm_compiled()) return;
+ frame = WasmCompiledFrame::cast(it.frame());
+ }
+
+ if (static_cast<int>(frame->function_index()) != flooded_function_index_) {
+ if (flooded_function_index_ != -1) {
+ std::vector<int>& breakpoints =
+ breakpoints_per_function_[flooded_function_index_];
+ RecompileLiftoffWithBreakpoints(flooded_function_index_,
+ VectorOf(breakpoints), isolate);
+ }
+ FloodWithBreakpoints(frame->function_index(), isolate);
+ flooded_function_index_ = frame->function_index();
+ }
+ stepping_frame_ = frame->id();
+ }
+
+ void ClearStepping() { stepping_frame_ = NO_ID; }
+
+ bool IsStepping(WasmCompiledFrame* frame) {
+ Isolate* isolate = frame->wasm_instance().GetIsolate();
+ StepAction last_step_action = isolate->debug()->last_step_action();
+ return stepping_frame_ == frame->id() || last_step_action == StepIn;
+ }
+
+ void RemoveBreakpoint(int func_index, int position,
+ Isolate* current_isolate) {
base::MutexGuard guard(&mutex_);
- if (debug_side_tables_.empty()) {
- debug_side_tables_.resize(native_module_->module()->functions.size());
+ const auto& function = native_module_->module()->functions[func_index];
+ int offset = position - function.code.offset();
+
+ std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
+ DCHECK_LT(0, offset);
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point != breakpoints.end() && *insertion_point == offset) {
+ breakpoints.erase(insertion_point);
}
- if (auto& existing_table = debug_side_tables_[func_index]) {
+ RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints),
+ current_isolate);
+ }
+
+ void RemoveDebugSideTables(Vector<WasmCode* const> codes) {
+ base::MutexGuard guard(&mutex_);
+ for (auto* code : codes) {
+ debug_side_tables_.erase(code);
+ }
+ }
+
+ private:
+ const DebugSideTable* GetDebugSideTable(WasmCode* code,
+ AccountingAllocator* allocator) {
+ base::MutexGuard guard(&mutex_);
+ if (auto& existing_table = debug_side_tables_[code]) {
return existing_table.get();
}
// Otherwise create the debug side table now.
- const WasmModule* module = native_module_->module();
- const WasmFunction* function = &module->functions[func_index];
+ auto* module = native_module_->module();
+ auto* function = &module->functions[code->index()];
ModuleWireBytes wire_bytes{native_module_->wire_bytes()};
Vector<const byte> function_bytes = wire_bytes.GetFunctionBytes(function);
CompilationEnv env = native_module_->CreateCompilationEnv();
FunctionBody func_body{function->sig, 0, function_bytes.begin(),
function_bytes.end()};
- DebugSideTable debug_side_table =
+ std::unique_ptr<DebugSideTable> debug_side_table =
GenerateLiftoffDebugSideTable(allocator, &env, func_body);
+ DebugSideTable* ret = debug_side_table.get();
// Install into cache and return.
- debug_side_tables_[func_index] =
- std::make_unique<DebugSideTable>(std::move(debug_side_table));
- return debug_side_tables_[func_index].get();
+ debug_side_tables_[code] = std::move(debug_side_table);
+ return ret;
}
// Get the value of a local (including parameters) or stack value. Stack
// values follow the locals in the same index space.
WasmValue GetValue(const DebugSideTable::Entry* debug_side_table_entry,
- ValueType type, int index, Address stack_address) {
- if (debug_side_table_entry->IsConstant(index)) {
+ int index, Address stack_frame_base,
+ Address debug_break_fp) const {
+ ValueType type = debug_side_table_entry->value_type(index);
+ if (debug_side_table_entry->is_constant(index)) {
DCHECK(type == kWasmI32 || type == kWasmI64);
return type == kWasmI32
- ? WasmValue(debug_side_table_entry->GetConstant(index))
+ ? WasmValue(debug_side_table_entry->i32_constant(index))
: WasmValue(
- int64_t{debug_side_table_entry->GetConstant(index)});
+ int64_t{debug_side_table_entry->i32_constant(index)});
+ }
+
+ if (debug_side_table_entry->is_register(index)) {
+ LiftoffRegister reg = LiftoffRegister::from_liftoff_code(
+ debug_side_table_entry->register_code(index));
+ auto gp_addr = [debug_break_fp](Register reg) {
+ return debug_break_fp +
+ WasmDebugBreakFrameConstants::GetPushedGpRegisterOffset(
+ reg.code());
+ };
+ if (reg.is_gp_pair()) {
+ DCHECK_EQ(kWasmI64, type);
+ uint32_t low_word = ReadUnalignedValue<uint32_t>(gp_addr(reg.low_gp()));
+ uint32_t high_word =
+ ReadUnalignedValue<uint32_t>(gp_addr(reg.high_gp()));
+ return WasmValue((uint64_t{high_word} << 32) | low_word);
+ }
+ if (reg.is_gp()) {
+ return type == kWasmI32
+ ? WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp())))
+ : WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp())));
+ }
+ // TODO(clemensb/zhin): Fix this for SIMD.
+ DCHECK(reg.is_fp() || reg.is_fp_pair());
+ if (reg.is_fp_pair()) UNIMPLEMENTED();
+ Address spilled_addr =
+ debug_break_fp +
+ WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset(
+ reg.fp().code());
+ return type == kWasmF32
+ ? WasmValue(ReadUnalignedValue<float>(spilled_addr))
+ : WasmValue(ReadUnalignedValue<double>(spilled_addr));
}
// Otherwise load the value from the stack.
- switch (type) {
- case kWasmI32:
+ Address stack_address =
+ stack_frame_base - debug_side_table_entry->stack_offset(index);
+ switch (type.kind()) {
+ case ValueType::kI32:
return WasmValue(ReadUnalignedValue<int32_t>(stack_address));
- case kWasmI64:
+ case ValueType::kI64:
return WasmValue(ReadUnalignedValue<int64_t>(stack_address));
- case kWasmF32:
+ case ValueType::kF32:
return WasmValue(ReadUnalignedValue<float>(stack_address));
- case kWasmF64:
+ case ValueType::kF64:
return WasmValue(ReadUnalignedValue<double>(stack_address));
default:
UNIMPLEMENTED();
}
}
+ // After installing a Liftoff code object with a different set of breakpoints,
+ // update return addresses on the stack so that execution resumes in the new
+ // code. The frame layout itself should be independent of breakpoints.
+ // TODO(thibaudm): update other threads as well.
+ void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code) {
+ DCHECK(new_code->is_liftoff());
+ // The first return location is after the breakpoint, others are after wasm
+ // calls.
+ ReturnLocation return_location = kAfterBreakpoint;
+ for (StackTraceFrameIterator it(isolate); !it.done();
+ it.Advance(), return_location = kAfterWasmCall) {
+ // We still need the flooded function for stepping.
+ if (it.frame()->id() == stepping_frame_) continue;
+ if (!it.is_wasm()) continue;
+ WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ if (frame->native_module() != new_code->native_module()) continue;
+ if (frame->function_index() != new_code->index()) continue;
+ WasmCode* old_code = frame->wasm_code();
+ if (!old_code->is_liftoff()) continue;
+ int pc_offset =
+ static_cast<int>(frame->pc() - old_code->instruction_start());
+ int position = frame->position();
+ int byte_offset = FindByteOffset(pc_offset, old_code);
+ Address new_pc = FindNewPC(new_code, byte_offset, return_location);
+ PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
+ kSystemPointerSize);
+ USE(position);
+ // The frame position should still be the same after OSR.
+ DCHECK_EQ(position, frame->position());
+ }
+ }
+
+ bool IsAtReturn(WasmCompiledFrame* frame) {
+ DisallowHeapAllocation no_gc;
+ int position = frame->position();
+ NativeModule* native_module =
+ frame->wasm_instance().module_object().native_module();
+ uint8_t opcode = native_module->wire_bytes()[position];
+ if (opcode == kExprReturn) return true;
+ // Another implicit return is at the last kExprEnd in the function body.
+ int func_index = frame->function_index();
+ WireBytesRef code = native_module->module()->functions[func_index].code;
+ return static_cast<size_t>(position) == code.end_offset() - 1;
+ }
+
NativeModule* const native_module_;
// {mutex_} protects all fields below.
- base::Mutex mutex_;
+ mutable base::Mutex mutex_;
- // DebugSideTable per function, lazily initialized.
- std::vector<std::unique_ptr<DebugSideTable>> debug_side_tables_;
+ // DebugSideTable per code object, lazily initialized.
+ std::unordered_map<WasmCode*, std::unique_ptr<DebugSideTable>>
+ debug_side_tables_;
// Names of locals, lazily decoded from the wire bytes.
std::unique_ptr<LocalNames> local_names_;
@@ -679,6 +967,11 @@ class DebugInfoImpl {
// function).
std::unordered_map<int, std::vector<int>> breakpoints_per_function_;
+ // Store the frame ID when stepping, to avoid breaking in recursive calls of
+ // the same function.
+ StackFrameId stepping_frame_ = NO_ID;
+ int flooded_function_index_ = -1;
+
DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
@@ -688,16 +981,43 @@ DebugInfo::DebugInfo(NativeModule* native_module)
DebugInfo::~DebugInfo() = default;
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
- Address fp) {
- return impl_->GetLocalScopeObject(isolate, pc, fp);
+ Address fp,
+ Address debug_break_fp) {
+ return impl_->GetLocalScopeObject(isolate, pc, fp, debug_break_fp);
+}
+
+Handle<JSObject> DebugInfo::GetStackScopeObject(Isolate* isolate, Address pc,
+ Address fp,
+ Address debug_break_fp) {
+ return impl_->GetStackScopeObject(isolate, pc, fp, debug_break_fp);
}
WireBytesRef DebugInfo::GetLocalName(int func_index, int local_index) {
return impl_->GetLocalName(func_index, local_index);
}
-void DebugInfo::SetBreakpoint(int func_index, int offset) {
- impl_->SetBreakpoint(func_index, offset);
+void DebugInfo::SetBreakpoint(int func_index, int offset,
+ Isolate* current_isolate) {
+ impl_->SetBreakpoint(func_index, offset, current_isolate);
+}
+
+void DebugInfo::PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
+ impl_->PrepareStep(isolate, break_frame_id);
+}
+
+void DebugInfo::ClearStepping() { impl_->ClearStepping(); }
+
+bool DebugInfo::IsStepping(WasmCompiledFrame* frame) {
+ return impl_->IsStepping(frame);
+}
+
+void DebugInfo::RemoveBreakpoint(int func_index, int offset,
+ Isolate* current_isolate) {
+ impl_->RemoveBreakpoint(func_index, offset, current_isolate);
+}
+
+void DebugInfo::RemoveDebugSideTables(Vector<WasmCode* const> code) {
+ impl_->RemoveDebugSideTables(code);
}
} // namespace wasm
@@ -823,8 +1143,9 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
module->functions[func_index].sig);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kInterpreterEntry,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kInterpreterEntry,
wasm::ExecutionTier::kInterpreter);
native_module->PublishCode(std::move(wasm_code));
DCHECK(native_module->IsRedirectedToInterpreter(func_index));
@@ -877,8 +1198,16 @@ Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
}
// static
+Handle<JSObject> WasmDebugInfo::GetStackScopeObject(
+ Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
+ auto* interp_handle = GetInterpreterHandle(*debug_info);
+ auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
+ return interp_handle->GetStackScopeObject(frame.get(), debug_info);
+}
+
+// static
Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
- wasm::FunctionSig* sig) {
+ const wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
@@ -909,7 +1238,7 @@ Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
namespace {
-// Return the next breakable position after {offset_in_func} in function
+// Return the next breakable position at or after {offset_in_func} in function
// {func_index}, or 0 if there is none.
// Note that 0 is never a breakable position in wasm, since the first byte
// contains the locals count for the function.
@@ -926,8 +1255,10 @@ int FindNextBreakablePosition(wasm::NativeModule* native_module, int func_index,
&locals);
DCHECK_LT(0, locals.encoded_size);
if (offset_in_func < 0) return 0;
- for (uint32_t offset : iterator.offsets()) {
- if (offset >= static_cast<uint32_t>(offset_in_func)) return offset;
+ for (; iterator.has_next(); iterator.next()) {
+ if (iterator.pc_offset() < static_cast<uint32_t>(offset_in_func)) continue;
+ if (!wasm::WasmOpcodes::IsBreakable(iterator.current())) continue;
+ return static_cast<int>(iterator.pc_offset());
}
return 0;
}
@@ -985,7 +1316,7 @@ bool WasmScript::SetBreakPointForFunction(Handle<Script> script, int func_index,
break_point);
if (FLAG_debug_in_liftoff) {
- native_module->GetDebugInfo()->SetBreakpoint(func_index, offset);
+ native_module->GetDebugInfo()->SetBreakpoint(func_index, offset, isolate);
} else {
// Iterate over all instances and tell them to set this new breakpoint.
// We do this using the weak list of all instances from the script.
@@ -1276,13 +1607,14 @@ bool WasmScript::GetPossibleBreakpoints(
module_start + func.code.end_offset(),
&locals);
DCHECK_LT(0u, locals.encoded_size);
- for (uint32_t offset : iterator.offsets()) {
- uint32_t total_offset = func.code.offset() + offset;
+ for (; iterator.has_next(); iterator.next()) {
+ uint32_t total_offset = func.code.offset() + iterator.pc_offset();
if (total_offset >= end_offset) {
DCHECK_EQ(end_func_index, func_idx);
break;
}
if (total_offset < start_offset) continue;
+ if (!wasm::WasmOpcodes::IsBreakable(iterator.current())) continue;
locations->emplace_back(0, total_offset, debug::kCommonBreakLocation);
}
}
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index ca0ab76bf7..2611b7facc 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -21,6 +21,9 @@ namespace internal {
template <typename T>
class Handle;
class JSObject;
+template <typename T>
+class Vector;
+class WasmCompiledFrame;
class WasmInstanceObject;
namespace wasm {
@@ -28,6 +31,7 @@ namespace wasm {
class DebugInfoImpl;
class LocalNames;
class NativeModule;
+class WasmCode;
class WireBytesRef;
// Side table storing information used to inspect Liftoff frames at runtime.
@@ -37,71 +41,66 @@ class DebugSideTable {
public:
class Entry {
public:
- struct Constant {
- int index;
- int32_t i32_const;
+ enum ValueKind : int8_t { kConstant, kRegister, kStack };
+ struct Value {
+ ValueType type;
+ ValueKind kind;
+ union {
+ int32_t i32_const; // if kind == kConstant
+ int reg_code; // if kind == kRegister
+ int stack_offset; // if kind == kStack
+ };
};
- Entry(int pc_offset, std::vector<ValueType> stack_types,
- std::vector<int> stack_offsets, std::vector<Constant> constants)
- : pc_offset_(pc_offset),
- stack_types_(std::move(stack_types)),
- stack_offsets_(std::move(stack_offsets)),
- constants_(std::move(constants)) {
- DCHECK(std::is_sorted(constants_.begin(), constants_.end(),
- ConstantIndexLess{}));
- DCHECK_EQ(stack_types_.size(), stack_offsets_.size());
- }
+ Entry(int pc_offset, std::vector<Value> values)
+ : pc_offset_(pc_offset), values_(std::move(values)) {}
// Constructor for map lookups (only initializes the {pc_offset_}).
explicit Entry(int pc_offset) : pc_offset_(pc_offset) {}
int pc_offset() const { return pc_offset_; }
- int stack_height() const { return static_cast<int>(stack_types_.size()); }
- ValueType stack_type(int stack_index) const {
- return stack_types_[stack_index];
+
+ int num_values() const { return static_cast<int>(values_.size()); }
+ ValueType value_type(int index) const { return values_[index].type; }
+
+ auto values() const {
+ return base::make_iterator_range(values_.begin(), values_.end());
}
- int stack_offset(int stack_index) const {
- return stack_offsets_[stack_index];
+
+ int stack_offset(int index) const {
+ DCHECK_EQ(kStack, values_[index].kind);
+ return values_[index].stack_offset;
}
- // {index} can point to a local or operand stack value.
- bool IsConstant(int index) const {
- return std::binary_search(constants_.begin(), constants_.end(),
- Constant{index, 0}, ConstantIndexLess{});
+
+ bool is_constant(int index) const {
+ return values_[index].kind == kConstant;
}
- int32_t GetConstant(int index) const {
- DCHECK(IsConstant(index));
- auto it = std::lower_bound(constants_.begin(), constants_.end(),
- Constant{index, 0}, ConstantIndexLess{});
- DCHECK_NE(it, constants_.end());
- DCHECK_EQ(it->index, index);
- return it->i32_const;
+
+ bool is_register(int index) const {
+ return values_[index].kind == kRegister;
}
- private:
- struct ConstantIndexLess {
- bool operator()(const Constant& a, const Constant& b) const {
- return a.index < b.index;
- }
- };
+ int32_t i32_constant(int index) const {
+ DCHECK_EQ(kConstant, values_[index].kind);
+ return values_[index].i32_const;
+ }
+ int32_t register_code(int index) const {
+ DCHECK_EQ(kRegister, values_[index].kind);
+ return values_[index].reg_code;
+ }
+
+ private:
int pc_offset_;
- // TODO(clemensb): Merge these vectors into one.
- std::vector<ValueType> stack_types_;
- std::vector<int> stack_offsets_;
- std::vector<Constant> constants_;
+ std::vector<Value> values_;
};
// Technically it would be fine to copy this class, but there should not be a
// reason to do so, hence mark it move only.
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(DebugSideTable);
- explicit DebugSideTable(std::vector<ValueType> local_types,
- std::vector<int> local_stack_offsets,
- std::vector<Entry> entries)
- : local_types_(std::move(local_types)),
- local_stack_offsets_(std::move(local_stack_offsets)),
- entries_(std::move(entries)) {
+ explicit DebugSideTable(int num_locals, std::vector<Entry> entries)
+ : num_locals_(num_locals), entries_(std::move(entries)) {
DCHECK(
std::is_sorted(entries_.begin(), entries_.end(), EntryPositionLess{}));
}
@@ -110,6 +109,7 @@ class DebugSideTable {
auto it = std::lower_bound(entries_.begin(), entries_.end(),
Entry{pc_offset}, EntryPositionLess{});
if (it == entries_.end() || it->pc_offset() != pc_offset) return nullptr;
+ DCHECK_LE(num_locals_, it->num_values());
return &*it;
}
@@ -117,12 +117,7 @@ class DebugSideTable {
return base::make_iterator_range(entries_.begin(), entries_.end());
}
- size_t num_entries() const { return entries_.size(); }
- int num_locals() const { return static_cast<int>(local_types_.size()); }
- ValueType local_type(int index) const { return local_types_[index]; }
- int local_stack_offset(int index) const {
- return local_stack_offsets_[index];
- }
+ int num_locals() const { return num_locals_; }
private:
struct EntryPositionLess {
@@ -131,8 +126,7 @@ class DebugSideTable {
}
};
- std::vector<ValueType> local_types_;
- std::vector<int32_t> local_stack_offsets_;
+ int num_locals_;
std::vector<Entry> entries_;
};
@@ -147,11 +141,27 @@ class DebugInfo {
explicit DebugInfo(NativeModule*);
~DebugInfo();
- Handle<JSObject> GetLocalScopeObject(Isolate*, Address pc, Address fp);
+ // {fp} is the frame pointer of the Liftoff frame, {debug_break_fp} that of
+ // the {WasmDebugBreak} frame (if any).
+ Handle<JSObject> GetLocalScopeObject(Isolate*, Address pc, Address fp,
+ Address debug_break_fp);
+
+ Handle<JSObject> GetStackScopeObject(Isolate*, Address pc, Address fp,
+ Address debug_break_fp);
WireBytesRef GetLocalName(int func_index, int local_index);
- void SetBreakpoint(int func_index, int offset);
+ void SetBreakpoint(int func_index, int offset, Isolate* current_isolate);
+
+ void PrepareStep(Isolate*, StackFrameId);
+
+ void ClearStepping();
+
+ bool IsStepping(WasmCompiledFrame*);
+
+ void RemoveBreakpoint(int func_index, int offset, Isolate* current_isolate);
+
+ void RemoveDebugSideTables(Vector<WasmCode* const>);
private:
std::unique_ptr<DebugInfoImpl> impl_;
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index c1fceb8311..573812e8fa 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -4,7 +4,9 @@
#include "src/wasm/wasm-engine.h"
+#include "src/base/functional.h"
#include "src/base/platform/time.h"
+#include "src/common/globals.h"
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
@@ -20,8 +22,13 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/module-instantiate.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+#include "src/debug/wasm/gdb-server/gdb-server.h"
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -129,22 +136,29 @@ class WasmGCForegroundTask : public CancelableTask {
std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
if (origin != kWasmOrigin) return nullptr;
- // Temporarily disabled to fix stability issue on M-81
- // (https://crbug.com/1070199).
- if (!FLAG_future) return nullptr;
base::MutexGuard lock(&mutex_);
+ size_t prefix_hash = PrefixHash(wire_bytes);
+ NativeModuleCache::Key key{prefix_hash, wire_bytes};
while (true) {
- auto it = map_.find(wire_bytes);
+ auto it = map_.find(key);
if (it == map_.end()) {
+ // Even though this exact key is not in the cache, there might be a
+ // matching prefix hash indicating that a streaming compilation is
+ // currently compiling a module with the same prefix. {OnFinishedStream}
+ // happens on the main thread too, so waiting for streaming compilation to
+ // finish would create a deadlock. Instead, compile the module twice and
+ // handle the conflict in {UpdateNativeModuleCache}.
+
// Insert a {nullopt} entry to let other threads know that this
// {NativeModule} is already being created on another thread.
- map_.emplace(wire_bytes, base::nullopt);
+ auto p = map_.emplace(key, base::nullopt);
+ USE(p);
+ DCHECK(p.second);
return nullptr;
}
- auto maybe_native_module = it->second;
- if (maybe_native_module.has_value()) {
- auto weak_ptr = maybe_native_module.value();
- if (auto shared_native_module = weak_ptr.lock()) {
+ if (it->second.has_value()) {
+ if (auto shared_native_module = it->second.value().lock()) {
+ DCHECK_EQ(shared_native_module->wire_bytes(), wire_bytes);
return shared_native_module;
}
}
@@ -152,50 +166,108 @@ std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
}
}
-void NativeModuleCache::Update(std::shared_ptr<NativeModule> native_module,
- bool error) {
+bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
+ base::MutexGuard lock(&mutex_);
+ auto it = map_.lower_bound(Key{prefix_hash, {}});
+ if (it != map_.end() && it->first.prefix_hash == prefix_hash) {
+ DCHECK_IMPLIES(!it->first.bytes.empty(),
+ PrefixHash(it->first.bytes) == prefix_hash);
+ return false;
+ }
+ Key key{prefix_hash, {}};
+ DCHECK_EQ(0, map_.count(key));
+ map_.emplace(key, base::nullopt);
+ return true;
+}
+
+void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
+ base::MutexGuard lock(&mutex_);
+ Key key{prefix_hash, {}};
+ DCHECK_EQ(1, map_.count(key));
+ map_.erase(key);
+ cache_cv_.NotifyAll();
+}
+
+std::shared_ptr<NativeModule> NativeModuleCache::Update(
+ std::shared_ptr<NativeModule> native_module, bool error) {
DCHECK_NOT_NULL(native_module);
- if (native_module->module()->origin != kWasmOrigin) return;
- // Temporarily disabled to fix stability issue on M-81
- // (https://crbug.com/1070199).
- if (!FLAG_future) return;
+ if (native_module->module()->origin != kWasmOrigin) return native_module;
Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ DCHECK(!wire_bytes.empty());
+ size_t prefix_hash = PrefixHash(native_module->wire_bytes());
base::MutexGuard lock(&mutex_);
- auto it = map_.find(wire_bytes);
- DCHECK_NE(it, map_.end());
- DCHECK(!it->second.has_value());
- // The lifetime of the temporary entry's bytes is unknown. Use the new native
- // module's owned copy of the bytes for the key instead.
- map_.erase(it);
+ map_.erase(Key{prefix_hash, {}});
+ const Key key{prefix_hash, wire_bytes};
+ auto it = map_.find(key);
+ if (it != map_.end()) {
+ if (it->second.has_value()) {
+ auto conflicting_module = it->second.value().lock();
+ if (conflicting_module != nullptr) {
+ DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
+ return conflicting_module;
+ }
+ }
+ map_.erase(it);
+ }
if (!error) {
- map_.emplace(wire_bytes, base::Optional<std::weak_ptr<NativeModule>>(
- std::move(native_module)));
+ // The key now points to the new native module's owned copy of the bytes,
+ // so that it stays valid until the native module is freed and erased from
+ // the map.
+ auto p = map_.emplace(
+ key, base::Optional<std::weak_ptr<NativeModule>>(native_module));
+ USE(p);
+ DCHECK(p.second);
}
cache_cv_.NotifyAll();
+ return native_module;
}
void NativeModuleCache::Erase(NativeModule* native_module) {
+ if (native_module->module()->origin != kWasmOrigin) return;
+ // Happens in some tests where bytes are set directly.
+ if (native_module->wire_bytes().empty()) return;
base::MutexGuard lock(&mutex_);
- auto cache_it = map_.find(native_module->wire_bytes());
- // Not all native modules are stored in the cache currently. In particular
- // streaming compilation and asmjs compilation results are not. So make
- // sure that we only delete existing and expired entries.
- // Do not erase {nullopt} values either, as they indicate that the
- // {NativeModule} is currently being created in another thread.
- if (cache_it != map_.end() && cache_it->second.has_value() &&
- cache_it->second.value().expired()) {
- map_.erase(cache_it);
- cache_cv_.NotifyAll();
- }
+ size_t prefix_hash = PrefixHash(native_module->wire_bytes());
+ map_.erase(Key{prefix_hash, native_module->wire_bytes()});
+ cache_cv_.NotifyAll();
}
-size_t NativeModuleCache::WireBytesHasher::operator()(
- const Vector<const uint8_t>& bytes) const {
+// static
+size_t NativeModuleCache::WireBytesHash(Vector<const uint8_t> bytes) {
return StringHasher::HashSequentialString(
reinterpret_cast<const char*>(bytes.begin()), bytes.length(),
kZeroHashSeed);
}
+// static
+size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
+ // Compute the hash as a combined hash of the sections up to the code section
+ // header, to mirror the way streaming compilation does it.
+ Decoder decoder(wire_bytes.begin(), wire_bytes.end());
+ decoder.consume_bytes(8, "module header");
+ size_t hash = NativeModuleCache::WireBytesHash(wire_bytes.SubVector(0, 8));
+ SectionCode section_id = SectionCode::kUnknownSectionCode;
+ while (decoder.ok() && decoder.more()) {
+ section_id = static_cast<SectionCode>(decoder.consume_u8());
+ uint32_t section_size = decoder.consume_u32v("section size");
+ if (section_id == SectionCode::kCodeSectionCode) {
+ uint32_t num_functions = decoder.consume_u32v("num functions");
+ // If {num_functions} is 0, the streaming decoder skips the section. Do
+ // the same here to ensure hashes are consistent.
+ if (num_functions != 0) {
+ hash = base::hash_combine(hash, section_size);
+ }
+ break;
+ }
+ const uint8_t* payload_start = decoder.pc();
+ decoder.consume_bytes(section_size, "section payload");
+ size_t section_hash = NativeModuleCache::WireBytesHash(
+ Vector<const uint8_t>(payload_start, section_size));
+ hash = base::hash_combine(hash, section_hash);
+ }
+ return hash;
+}
+
struct WasmEngine::CurrentGCInfo {
explicit CurrentGCInfo(int8_t gc_sequence_index)
: gc_sequence_index(gc_sequence_index) {
@@ -261,6 +333,9 @@ struct WasmEngine::IsolateInfo {
std::shared_ptr<v8::TaskRunner> foreground_task_runner;
const std::shared_ptr<Counters> async_counters;
+
+ // Keep new modules in tiered down state.
+ bool keep_tiered_down = false;
};
struct WasmEngine::NativeModuleInfo {
@@ -285,6 +360,11 @@ struct WasmEngine::NativeModuleInfo {
WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+ // Synchronize on the GDB-remote thread, if running.
+ gdb_server_ = nullptr;
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
// Synchronize on all background compile tasks.
background_compile_task_manager_.CancelAndWait();
// All AsyncCompileJobs have been canceled.
@@ -293,6 +373,8 @@ WasmEngine::~WasmEngine() {
DCHECK(isolates_.empty());
// All NativeModules did die.
DCHECK(native_modules_.empty());
+ // Native module cache does not leak.
+ DCHECK(native_module_cache_.empty());
}
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
@@ -368,9 +450,21 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
std::move(result).value(), bytes, &export_wrappers);
if (!native_module) return {};
- Handle<Script> script = CreateWasmScript(
- isolate, bytes, VectorOf(native_module->module()->source_map_url),
- native_module->module()->name);
+#ifdef DEBUG
+ // Ensure that code GC will check this isolate for live code.
+ {
+ base::MutexGuard lock(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module.get()));
+ DCHECK_EQ(1, native_modules_.count(native_module.get()));
+ DCHECK_EQ(1, native_modules_[native_module.get()]->isolates.count(isolate));
+ }
+#endif
+
+ Handle<Script> script =
+ CreateWasmScript(isolate, bytes.module_bytes(),
+ VectorOf(native_module->module()->source_map_url),
+ native_module->module()->name);
// Create the compiled module object and populate with compiled functions
// and information needed at instantiation time. This object needs to be
@@ -500,6 +594,35 @@ void WasmEngine::RecompileAllFunctions(Isolate* isolate,
RecompileNativeModule(isolate, native_module, tier);
}
+void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
+ std::vector<NativeModule*> native_modules;
+ {
+ base::MutexGuard lock(&mutex_);
+ if (isolates_[isolate]->keep_tiered_down) return;
+ isolates_[isolate]->keep_tiered_down = true;
+ for (auto* native_module : isolates_[isolate]->native_modules) {
+ native_modules.push_back(native_module);
+ }
+ }
+ for (auto* native_module : native_modules) {
+ native_module->TierDown(isolate);
+ }
+}
+
+void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
+ std::vector<NativeModule*> native_modules;
+ {
+ base::MutexGuard lock(&mutex_);
+ isolates_[isolate]->keep_tiered_down = false;
+ for (auto* native_module : isolates_[isolate]->native_modules) {
+ native_modules.push_back(native_module);
+ }
+ }
+ for (auto* native_module : native_modules) {
+ native_module->TierUp(isolate);
+ }
+}
+
std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
Handle<WasmModuleObject> module_object) {
return module_object->shared_native_module();
@@ -509,9 +632,10 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module) {
NativeModule* native_module = shared_native_module.get();
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- Handle<Script> script = CreateWasmScript(
- isolate, wire_bytes, VectorOf(native_module->module()->source_map_url),
- native_module->module()->name);
+ Handle<Script> script =
+ CreateWasmScript(isolate, wire_bytes.module_bytes(),
+ VectorOf(native_module->module()->source_map_url),
+ native_module->module()->name);
Handle<FixedArray> export_wrappers;
CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
@@ -623,15 +747,6 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
}
}
-namespace {
-int GetGCTimeMicros(base::TimeTicks start) {
- DCHECK(!start.IsNull());
- int64_t duration_us = (base::TimeTicks::Now() - start).InMicroseconds();
- return static_cast<int>(
- std::min(std::max(int64_t{0}, duration_us), int64_t{kMaxInt}));
-}
-} // namespace
-
void WasmEngine::AddIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
DCHECK_EQ(0, isolates_.count(isolate));
@@ -651,13 +766,6 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
for (auto* native_module : engine->isolates_[isolate]->native_modules) {
native_module->SampleCodeSize(counters, NativeModule::kSampling);
}
- // If there is an ongoing code GC, sample its time here. This will record
- // samples for very long-running or never ending GCs.
- if (engine->current_gc_info_ &&
- !engine->current_gc_info_->start_time.IsNull()) {
- isolate->counters()->wasm_code_gc_time()->AddSample(
- GetGCTimeMicros(engine->current_gc_info_->start_time));
- }
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
nullptr);
@@ -746,30 +854,69 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled,
std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
- // TODO(clemensb): Remove --wasm-far-jump-table and {can_request_more}.
- bool can_request_more =
- !wasm::NativeModule::kNeedsFarJumpsBetweenCodeSpaces ||
- FLAG_wasm_far_jump_table;
- std::shared_ptr<NativeModule> native_module =
- code_manager_.NewNativeModule(this, isolate, enabled, code_size_estimate,
- can_request_more, std::move(module));
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+ if (FLAG_wasm_gdb_remote && !gdb_server_) {
+ gdb_server_ = std::make_unique<gdb_server::GdbServer>();
+ }
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
+ std::shared_ptr<NativeModule> native_module = code_manager_.NewNativeModule(
+ this, isolate, enabled, code_size_estimate, std::move(module));
base::MutexGuard lock(&mutex_);
auto pair = native_modules_.insert(std::make_pair(
native_module.get(), std::make_unique<NativeModuleInfo>()));
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
+ if (isolates_[isolate]->keep_tiered_down) {
+ native_module->SetTieredDown();
+ }
return native_module;
}
std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
- ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
- return native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
+ ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
+ std::shared_ptr<NativeModule> native_module =
+ native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
+ if (native_module) {
+ base::MutexGuard guard(&mutex_);
+ auto& native_module_info = native_modules_[native_module.get()];
+ if (!native_module_info) {
+ native_module_info = std::make_unique<NativeModuleInfo>();
+ }
+ native_module_info->isolates.insert(isolate);
+ isolates_[isolate]->native_modules.insert(native_module.get());
+ }
+ return native_module;
}
-void WasmEngine::UpdateNativeModuleCache(
- std::shared_ptr<NativeModule> native_module, bool error) {
- native_module_cache_.Update(native_module, error);
+bool WasmEngine::UpdateNativeModuleCache(
+ bool error, std::shared_ptr<NativeModule>* native_module,
+ Isolate* isolate) {
+ // Pass {native_module} by value here to keep it alive until at least after
+ // we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
+ // which would lock the mutex twice.
+ auto prev = native_module->get();
+ *native_module = native_module_cache_.Update(*native_module, error);
+
+ if (prev == native_module->get()) return true;
+
+ base::MutexGuard guard(&mutex_);
+ auto& native_module_info = native_modules_[native_module->get()];
+ if (!native_module_info) {
+ native_module_info = std::make_unique<NativeModuleInfo>();
+ }
+ native_module_info->isolates.insert(isolate);
+ isolates_[isolate]->native_modules.insert((*native_module).get());
+ return false;
+}
+
+bool WasmEngine::GetStreamingCompilationOwnership(size_t prefix_hash) {
+ return native_module_cache_.GetStreamingCompilationOwnership(prefix_hash);
+}
+
+void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
+ native_module_cache_.StreamingCompilationFailed(prefix_hash);
}
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
@@ -947,9 +1094,6 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
DCHECK(FLAG_wasm_code_gc);
new_potentially_dead_code_size_ = 0;
current_gc_info_.reset(new CurrentGCInfo(gc_sequence_index));
- if (base::TimeTicks::IsHighResolution()) {
- current_gc_info_->start_time = base::TimeTicks::Now();
- }
// Add all potentially dead code to this GC, and trigger a GC task in each
// isolate.
for (auto& entry : native_modules_) {
@@ -1011,16 +1155,8 @@ void WasmEngine::PotentiallyFinishCurrentGC() {
FreeDeadCodeLocked(dead_code);
- int duration_us = 0;
- if (!current_gc_info_->start_time.IsNull()) {
- duration_us = GetGCTimeMicros(current_gc_info_->start_time);
- for (auto& entry : isolates_) {
- entry.second->async_counters->wasm_code_gc_time()->AddSample(duration_us);
- }
- }
-
- TRACE_CODE_GC("Took %d us; found %zu dead code objects, freed %zu.\n",
- duration_us, current_gc_info_->dead_code.size(), num_freed);
+ TRACE_CODE_GC("Found %zu dead code objects, freed %zu.\n",
+ current_gc_info_->dead_code.size(), num_freed);
USE(num_freed);
int8_t next_gc_sequence_index = current_gc_info_->next_gc_sequence_index;
@@ -1050,12 +1186,18 @@ std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
return *GetSharedWasmEngine();
}
-// {max_mem_pages} is declared in wasm-limits.h.
-uint32_t max_mem_pages() {
+// {max_initial_mem_pages} is declared in wasm-limits.h.
+uint32_t max_initial_mem_pages() {
STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
+uint32_t max_maximum_mem_pages() {
+ STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
+ return std::min(uint32_t{kV8MaxWasmMemoryPages},
+ FLAG_wasm_max_mem_pages_growth);
+}
+
// {max_table_init_entries} is declared in wasm-limits.h.
uint32_t max_table_init_entries() {
return std::min(uint32_t{kV8MaxWasmTableInitEntries},
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 073d617b31..7d14fef506 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_WASM_ENGINE_H_
#define V8_WASM_WASM_ENGINE_H_
+#include <algorithm>
+#include <map>
#include <memory>
#include <unordered_map>
#include <unordered_set>
@@ -34,6 +36,10 @@ class ErrorThrower;
struct ModuleWireBytes;
class WasmFeatures;
+namespace gdb_server {
+class GdbServer;
+}
+
class V8_EXPORT_PRIVATE CompilationResultResolver {
public:
virtual void OnCompilationSucceeded(Handle<WasmModuleObject> result) = 0;
@@ -51,15 +57,57 @@ class V8_EXPORT_PRIVATE InstantiationResultResolver {
// Native modules cached by their wire bytes.
class NativeModuleCache {
public:
- struct WireBytesHasher {
- size_t operator()(const Vector<const uint8_t>& bytes) const;
+ struct Key {
+ // Store the prefix hash as part of the key for faster lookup, and to
+ // quickly check existing prefixes for streaming compilation.
+ size_t prefix_hash;
+ Vector<const uint8_t> bytes;
+
+ bool operator==(const Key& other) const {
+ bool eq = bytes == other.bytes;
+ DCHECK_IMPLIES(eq, prefix_hash == other.prefix_hash);
+ return eq;
+ }
+
+ bool operator<(const Key& other) const {
+ if (prefix_hash != other.prefix_hash) {
+ DCHECK_IMPLIES(!bytes.empty() && !other.bytes.empty(),
+ bytes != other.bytes);
+ return prefix_hash < other.prefix_hash;
+ }
+ if (bytes.size() != other.bytes.size()) {
+ return bytes.size() < other.bytes.size();
+ }
+ // Fast path when the base pointers are the same.
+ // Also handles the {nullptr} case which would be UB for memcmp.
+ if (bytes.begin() == other.bytes.begin()) {
+ DCHECK_EQ(prefix_hash, other.prefix_hash);
+ return false;
+ }
+ DCHECK_NOT_NULL(bytes.begin());
+ DCHECK_NOT_NULL(other.bytes.begin());
+ return memcmp(bytes.begin(), other.bytes.begin(), bytes.size()) < 0;
+ }
};
std::shared_ptr<NativeModule> MaybeGetNativeModule(
ModuleOrigin origin, Vector<const uint8_t> wire_bytes);
- void Update(std::shared_ptr<NativeModule> native_module, bool error);
+ bool GetStreamingCompilationOwnership(size_t prefix_hash);
+ void StreamingCompilationFailed(size_t prefix_hash);
+ std::shared_ptr<NativeModule> Update(
+ std::shared_ptr<NativeModule> native_module, bool error);
void Erase(NativeModule* native_module);
+ bool empty() { return map_.empty(); }
+
+ static size_t WireBytesHash(Vector<const uint8_t> bytes);
+
+ // Hash the wire bytes up to the code section header. Used as a heuristic to
+ // avoid streaming compilation of modules that are likely already in the
+ // cache. See {GetStreamingCompilationOwnership}. Assumes that the bytes have
+ // already been validated.
+ static size_t PrefixHash(Vector<const uint8_t> wire_bytes);
+
private:
// Each key points to the corresponding native module's wire bytes, so they
// should always be valid as long as the native module is alive. When
@@ -72,10 +120,7 @@ class NativeModuleCache {
// before trying to get it from the cache.
// By contrast, an expired {weak_ptr} indicates that the native module died
// and will soon be cleaned up from the cache.
- std::unordered_map<Vector<const uint8_t>,
- base::Optional<std::weak_ptr<NativeModule>>,
- WireBytesHasher>
- map_;
+ std::map<Key, base::Optional<std::weak_ptr<NativeModule>>> map_;
base::Mutex mutex_;
@@ -86,13 +131,13 @@ class NativeModuleCache {
};
// The central data structure that represents an engine instance capable of
-// loading, instantiating, and executing WASM code.
+// loading, instantiating, and executing Wasm code.
class V8_EXPORT_PRIVATE WasmEngine {
public:
WasmEngine();
~WasmEngine();
- // Synchronously validates the given bytes that represent an encoded WASM
+ // Synchronously validates the given bytes that represent an encoded Wasm
// module.
bool SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
const ModuleWireBytes& bytes);
@@ -107,14 +152,14 @@ class V8_EXPORT_PRIVATE WasmEngine {
Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
Handle<Script> script);
- // Synchronously compiles the given bytes that represent an encoded WASM
+ // Synchronously compiles the given bytes that represent an encoded Wasm
// module.
MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
const WasmFeatures& enabled,
ErrorThrower* thrower,
const ModuleWireBytes& bytes);
- // Synchronously instantiate the given WASM module with the given imports.
+ // Synchronously instantiate the given Wasm module with the given imports.
// If the module represents an asm.js module, then the supplied {memory}
// should be used as the memory of the instance.
MaybeHandle<WasmInstanceObject> SyncInstantiate(
@@ -123,7 +168,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<JSArrayBuffer> memory);
// Begin an asynchronous compilation of the given bytes that represent an
- // encoded WASM module.
+ // encoded Wasm module.
// The {is_shared} flag indicates if the bytes backing the module could
// be shared across threads, i.e. could be concurrently modified.
void AsyncCompile(Isolate* isolate, const WasmFeatures& enabled,
@@ -131,7 +176,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
const ModuleWireBytes& bytes, bool is_shared,
const char* api_method_name_for_errors);
- // Begin an asynchronous instantiation of the given WASM module.
+ // Begin an asynchronous instantiation of the given Wasm module.
void AsyncInstantiate(Isolate* isolate,
std::unique_ptr<InstantiationResultResolver> resolver,
Handle<WasmModuleObject> module_object,
@@ -152,6 +197,9 @@ class V8_EXPORT_PRIVATE WasmEngine {
void RecompileAllFunctions(Isolate* isolate, NativeModule* native_module,
ExecutionTier tier);
+ void TierDownAllModulesPerIsolate(Isolate* isolate);
+ void TierUpAllModulesPerIsolate(Isolate* isolate);
+
// Exports the sharable parts of the given module object so that they can be
// transferred to a different Context/Isolate using the same engine.
std::shared_ptr<NativeModule> ExportNativeModule(
@@ -226,21 +274,41 @@ class V8_EXPORT_PRIVATE WasmEngine {
Isolate* isolate, const WasmFeatures& enabled_features,
std::shared_ptr<const WasmModule> module, size_t code_size_estimate);
- // Try getting a cached {NativeModule}. The {wire_bytes}' underlying array
- // should be valid at least until the next call to {UpdateNativeModuleCache}.
- // Return nullptr if no {NativeModule} exists for these bytes. In this case,
- // an empty entry is added to let other threads know that a {NativeModule} for
- // these bytes is currently being created. The caller should eventually call
- // {UpdateNativeModuleCache} to update the entry and wake up other threads.
+ // Try getting a cached {NativeModule}, or get ownership for its creation.
+ // Return {nullptr} if no {NativeModule} exists for these bytes. In this case,
+ // a {nullopt} entry is added to let other threads know that a {NativeModule}
+ // for these bytes is currently being created. The caller should eventually
+ // call {UpdateNativeModuleCache} to update the entry and wake up other
+ // threads. The {wire_bytes}' underlying array should be valid at least until
+ // the call to {UpdateNativeModuleCache}.
std::shared_ptr<NativeModule> MaybeGetNativeModule(
- ModuleOrigin origin, Vector<const uint8_t> wire_bytes);
+ ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate);
- // Update the temporary entry inserted by {MaybeGetNativeModule}.
- // If {error} is true, the entry is erased. Otherwise the entry is updated to
- // match the {native_module} argument. Wake up threads waiting for this native
+ // Replace the temporary {nullopt} with the new native module, or
+ // erase it if any error occurred. Wake up blocked threads waiting for this
// module.
- void UpdateNativeModuleCache(std::shared_ptr<NativeModule> native_module,
- bool error);
+ // To avoid a deadlock on the main thread between synchronous and streaming
+ // compilation, two compilation jobs might compile the same native module at
+ // the same time. In this case the first call to {UpdateNativeModuleCache}
+ // will insert the native module in the cache, and the last call will discard
+ // its {native_module} argument and replace it with the existing entry.
+ // Return true in the former case, and false in the latter.
+ bool UpdateNativeModuleCache(bool error,
+ std::shared_ptr<NativeModule>* native_module,
+ Isolate* isolate);
+
+ // Register this prefix hash for a streaming compilation job.
+ // If the hash is not in the cache yet, the function returns true and the
+ // caller owns the compilation of this module.
+ // Otherwise another compilation job is currently preparing or has already
+ // prepared a module with the same prefix hash. The caller should wait until
+ // the stream is finished and call {MaybeGetNativeModule} to either get the
+ // module from the cache or get ownership for the compilation of these bytes.
+ bool GetStreamingCompilationOwnership(size_t prefix_hash);
+
+ // Remove the prefix hash from the cache when compilation failed. If
+ // compilation succeeded, {UpdateNativeModuleCache} should be called instead.
+ void StreamingCompilationFailed(size_t prefix_hash);
void FreeNativeModule(NativeModule*);
@@ -305,6 +373,11 @@ class V8_EXPORT_PRIVATE WasmEngine {
// engine, they must all be finished because they access the allocator.
CancelableTaskManager background_compile_task_manager_;
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+ // Implements a GDB-remote stub for WebAssembly debugging.
+ std::unique_ptr<gdb_server::GdbServer> gdb_server_;
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
// This mutex protects all information which is mutated concurrently or
// fields that are initialized lazily on the first access.
base::Mutex mutex_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 5c1058d788..6dbb939384 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -11,7 +11,9 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
+#include "src/common/assert-scope.h"
#include "src/utils/memcopy.h"
+#include "src/wasm/wasm-objects-inl.h"
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) || \
@@ -323,60 +325,140 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, base::ieee754::pow(x, y));
}
-// Asan on Windows triggers exceptions in this function to allocate
-// shadow memory lazily. When this function is called from WebAssembly,
-// these exceptions would be handled by the trap handler before they get
-// handled by Asan, and thereby confuse the thread-in-wasm flag.
-// Therefore we disable ASAN for this function. Alternatively we could
-// reset the thread-in-wasm flag before calling this function. However,
-// as this is only a problem with Asan on Windows, we did not consider
-// it worth the overhead.
-DISABLE_ASAN void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
- // Use explicit forward and backward copy to match the required semantics for
- // the memory.copy instruction. It is assumed that the caller of this
- // function has already performed bounds checks, so {src + size} and
- // {dst + size} should not overflow.
- DCHECK(src + size >= src && dst + size >= dst);
- uint8_t* dst8 = reinterpret_cast<uint8_t*>(dst);
- uint8_t* src8 = reinterpret_cast<uint8_t*>(src);
- if (src < dst && src + size > dst && dst + size > src) {
- dst8 += size - 1;
- src8 += size - 1;
- for (; size > 0; size--) {
- *dst8-- = *src8--;
+namespace {
+class ThreadNotInWasmScope {
+// Asan on Windows triggers exceptions to allocate shadow memory lazily. When
+// this function is called from WebAssembly, these exceptions would be handled
+// by the trap handler before they get handled by Asan, and thereby confuse the
+// thread-in-wasm flag. Therefore we disable ASAN for this function.
+// Alternatively we could reset the thread-in-wasm flag before calling this
+// function. However, as this is only a problem with Asan on Windows, we did not
+// consider it worth the overhead.
+#if defined(RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS)
+
+ public:
+ ThreadNotInWasmScope() : thread_was_in_wasm_(trap_handler::IsThreadInWasm()) {
+ if (thread_was_in_wasm_) {
+ trap_handler::ClearThreadInWasm();
}
- } else {
- for (; size > 0; size--) {
- *dst8++ = *src8++;
+ }
+
+ ~ThreadNotInWasmScope() {
+ if (thread_was_in_wasm_) {
+ trap_handler::SetThreadInWasm();
}
}
-}
-// Asan on Windows triggers exceptions in this function that confuse the
-// WebAssembly trap handler, so Asan is disabled. See the comment on
-// memory_copy_wrapper above for more info.
-void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size) {
-#if defined(RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS)
- bool thread_was_in_wasm = trap_handler::IsThreadInWasm();
- if (thread_was_in_wasm) {
- trap_handler::ClearThreadInWasm();
+ private:
+ bool thread_was_in_wasm_;
+#else
+
+ public:
+ ThreadNotInWasmScope() {
+ // This is needed to avoid compilation errors (unused variable).
+ USE(this);
}
#endif
+};
- // Use an explicit forward copy to match the required semantics for the
- // memory.fill instruction. It is assumed that the caller of this function
- // has already performed bounds checks, so {dst + size} should not overflow.
- DCHECK(dst + size >= dst);
- uint8_t* dst8 = reinterpret_cast<uint8_t*>(dst);
- uint8_t value8 = static_cast<uint8_t>(value);
- for (; size > 0; size--) {
- *dst8++ = value8;
- }
-#if defined(RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS)
- if (thread_was_in_wasm) {
- trap_handler::SetThreadInWasm();
- }
+#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
+inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
+ return instance.memory_start() + index;
+}
+
+inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
+ return base + index;
+}
+
+#else
+inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
+ // Compute the effective address of the access, making sure to condition
+ // the index even in the in-bounds case.
+ return instance.memory_start() + (index & instance.memory_mask());
+}
+
+inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
+ size_t mem_mask = base::bits::RoundUpToPowerOfTwo(size) - 1;
+ return base + (index & mem_mask);
+}
#endif
+
+template <typename V>
+V ReadAndIncrementOffset(Address data, size_t* offset) {
+ V result = ReadUnalignedValue<V>(data + *offset);
+ *offset += sizeof(V);
+ return result;
+}
+} // namespace
+
+int32_t memory_init_wrapper(Address data) {
+ constexpr int32_t kSuccess = 1;
+ constexpr int32_t kOutOfBounds = 0;
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowHeapAllocation disallow_heap_allocation;
+ size_t offset = 0;
+ Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
+ WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
+ uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t seg_index = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+
+ size_t mem_size = instance.memory_size();
+ if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+
+ size_t seg_size = instance.data_segment_sizes()[seg_index];
+ if (!base::IsInBounds(src, size, seg_size)) return kOutOfBounds;
+
+ byte* seg_start =
+ reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]);
+ std::memcpy(EffectiveAddress(instance, dst),
+ EffectiveAddress(seg_start, seg_size, src), size);
+ return kSuccess;
+}
+
+int32_t memory_copy_wrapper(Address data) {
+ constexpr int32_t kSuccess = 1;
+ constexpr int32_t kOutOfBounds = 0;
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowHeapAllocation disallow_heap_allocation;
+ size_t offset = 0;
+ Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
+ WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
+ uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+
+ size_t mem_size = instance.memory_size();
+ if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+ if (!base::IsInBounds(src, size, mem_size)) return kOutOfBounds;
+
+ // Use std::memmove, because the ranges can overlap.
+ std::memmove(EffectiveAddress(instance, dst), EffectiveAddress(instance, src),
+ size);
+ return kSuccess;
+}
+
+int32_t memory_fill_wrapper(Address data) {
+ constexpr int32_t kSuccess = 1;
+ constexpr int32_t kOutOfBounds = 0;
+
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowHeapAllocation disallow_heap_allocation;
+
+ size_t offset = 0;
+ Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
+ WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
+ uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint8_t value =
+ static_cast<uint8_t>(ReadAndIncrementOffset<uint32_t>(data, &offset));
+ size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+
+ size_t mem_size = instance.memory_size();
+ if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+
+ std::memset(EffectiveAddress(instance, dst), value, size);
+ return kSuccess;
}
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index d102651702..0a2d5f3060 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -71,9 +71,17 @@ V8_EXPORT_PRIVATE void word64_ror_wrapper(Address data);
V8_EXPORT_PRIVATE void float64_pow_wrapper(Address data);
-void memory_copy_wrapper(Address dst, Address src, uint32_t size);
+// The return type is {int32_t} instead of {bool} to enforce the compiler to
+// zero-extend the result in the return register.
+int32_t memory_init_wrapper(Address data);
-void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size);
+// The return type is {int32_t} instead of {bool} to enforce the compiler to
+// zero-extend the result in the return register.
+int32_t memory_copy_wrapper(Address data);
+
+// The return type is {int32_t} instead of {bool} to enforce the compiler to
+// zero-extend the result in the return register.
+int32_t memory_fill_wrapper(Address data);
using WasmTrapCallbackForTesting = void (*)();
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index 9469855ee5..a71152518a 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -24,7 +24,7 @@ WasmCode*& WasmImportWrapperCache::operator[](
}
WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
- FunctionSig* sig) const {
+ const FunctionSig* sig) const {
base::MutexGuard lock(&mutex_);
auto it = entry_map_.find({kind, sig});
DCHECK(it != entry_map_.end());
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 6172881dc7..39f57669cd 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -23,7 +23,7 @@ using FunctionSig = Signature<ValueType>;
// Implements a cache for import wrappers.
class WasmImportWrapperCache {
public:
- using CacheKey = std::pair<compiler::WasmImportCallKind, FunctionSig*>;
+ using CacheKey = std::pair<compiler::WasmImportCallKind, const FunctionSig*>;
class CacheKeyHash {
public:
@@ -51,7 +51,7 @@ class WasmImportWrapperCache {
// Thread-safe. Assumes the key exists in the map.
V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
- FunctionSig* sig) const;
+ const FunctionSig* sig) const;
~WasmImportWrapperCache();
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index e8b073c2ce..b1dc8f22ef 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -26,6 +26,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -1301,12 +1302,14 @@ class ThreadImpl {
WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
Isolate* isolate) {
DCHECK(isolate->has_pending_exception());
+ bool catchable =
+ isolate->is_catchable_by_wasm(isolate->pending_exception());
DCHECK_LT(0, activations_.size());
Activation& act = activations_.back();
while (frames_.size() > act.fp) {
Frame& frame = frames_.back();
InterpreterCode* code = frame.code;
- if (code->side_table->HasEntryAt(frame.pc)) {
+ if (catchable && code->side_table->HasEntryAt(frame.pc)) {
TRACE("----- HANDLE -----\n");
Push(WasmValue(handle(isolate->pending_exception(), isolate)));
isolate->clear_pending_exception();
@@ -1453,23 +1456,24 @@ class ThreadImpl {
}
pc_t InitLocals(InterpreterCode* code) {
- for (auto p : code->locals.type_list) {
+ for (ValueType p : code->locals.type_list) {
WasmValue val;
- switch (p) {
+ switch (p.kind()) {
#define CASE_TYPE(valuetype, ctype) \
- case valuetype: \
+ case ValueType::valuetype: \
val = WasmValue(ctype{}); \
break;
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef: {
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef: {
val = WasmValue(isolate_->factory()->null_value());
break;
}
- default:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
UNREACHABLE();
break;
}
@@ -1752,15 +1756,11 @@ class ThreadImpl {
bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
pc_t pc, int* const len,
uint32_t* buffer_offset, type* val,
- double* timeout = nullptr) {
+ int64_t* timeout = nullptr) {
MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (timeout) {
- double timeout_ns = Pop().to<int64_t>();
- *timeout = (timeout_ns < 0)
- ? V8_INFINITY
- : timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
- base::Time::kMicrosecondsPerMillisecond);
+ *timeout = Pop().to<int64_t>();
}
*val = Pop().to<type>();
auto index = Pop().to<uint32_t>();
@@ -1828,7 +1828,8 @@ class ThreadImpl {
Address src_addr =
instance_object_->data_segment_starts()[imm.data_segment_index] +
src;
- memory_copy_wrapper(dst_addr, src_addr, size);
+ std::memmove(reinterpret_cast<void*>(dst_addr),
+ reinterpret_cast<void*>(src_addr), size);
return true;
}
case kExprDataDrop: {
@@ -1854,7 +1855,8 @@ class ThreadImpl {
return false;
}
- memory_copy_wrapper(dst_addr, src_addr, size);
+ std::memmove(reinterpret_cast<void*>(dst_addr),
+ reinterpret_cast<void*>(src_addr), size);
return true;
}
case kExprMemoryFill: {
@@ -1870,7 +1872,7 @@ class ThreadImpl {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- memory_fill_wrapper(dst_addr, value, size);
+ std::memset(reinterpret_cast<void*>(dst_addr), value, size);
return true;
}
case kExprTableInit: {
@@ -1951,12 +1953,12 @@ class ThreadImpl {
// Even when table.fill goes out-of-bounds, as many entries as possible
// are put into the table. Only afterwards we trap.
uint32_t fill_count = std::min(count, table_size - start);
- WasmTableObject::Fill(isolate_, table, start, value, fill_count);
-
if (fill_count < count) {
DoTrap(kTrapTableOutOfBounds, pc);
return false;
}
+ WasmTableObject::Fill(isolate_, table, start, value, fill_count);
+
*len += imm.length;
return true;
}
@@ -2169,7 +2171,7 @@ class ThreadImpl {
break;
case kExprI32AtomicWait: {
int32_t val;
- double timeout;
+ int64_t timeout;
uint32_t buffer_offset;
if (!ExtractAtomicWaitNotifyParams<int32_t>(
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
@@ -2178,14 +2180,14 @@ class ThreadImpl {
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::Wait32(isolate_, array_buffer,
- buffer_offset, val, timeout);
+ auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
+ buffer_offset, val, timeout);
Push(WasmValue(result.ToSmi().value()));
break;
}
case kExprI64AtomicWait: {
int64_t val;
- double timeout;
+ int64_t timeout;
uint32_t buffer_offset;
if (!ExtractAtomicWaitNotifyParams<int64_t>(
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
@@ -2194,8 +2196,8 @@ class ThreadImpl {
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::Wait64(isolate_, array_buffer,
- buffer_offset, val, timeout);
+ auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
+ buffer_offset, val, timeout);
Push(WasmValue(result.ToSmi().value()));
break;
}
@@ -2373,10 +2375,33 @@ class ThreadImpl {
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
+ UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
+ UNOP_CASE(I16x8Abs, i16x8, int8, 8, std::abs(a))
UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
+ UNOP_CASE(I8x16Abs, i8x16, int16, 16, std::abs(a))
#undef UNOP_CASE
+
+// Cast to double in call to signbit is due to MSCV issue, see
+// https://github.com/microsoft/STL/issues/519.
+#define BITMASK_CASE(op, name, stype, count) \
+ case kExpr##op: { \
+ WasmValue v = Pop(); \
+ stype s = v.to_s128().to_##name(); \
+ int32_t res = 0; \
+ for (size_t i = 0; i < count; ++i) { \
+ bool sign = std::signbit(static_cast<double>(s.val[i])); \
+ res |= (sign << i); \
+ } \
+ Push(WasmValue(res)); \
+ return true; \
+ }
+ BITMASK_CASE(I8x16BitMask, i8x16, int16, 16)
+ BITMASK_CASE(I16x8BitMask, i16x8, int8, 8)
+ BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
+#undef BITMASK_CASE
+
#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
case kExpr##op: { \
WasmValue v2 = Pop(); \
@@ -2827,28 +2852,28 @@ class ThreadImpl {
sp_t base_index = StackHeight() - sig->parameter_count();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
WasmValue value = GetStackValue(base_index + i);
- switch (sig->GetParam(i)) {
- case kWasmI32: {
+ switch (sig->GetParam(i).kind()) {
+ case ValueType::kI32: {
uint32_t u32 = value.to_u32();
EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
break;
}
- case kWasmF32: {
+ case ValueType::kF32: {
uint32_t f32 = value.to_f32_boxed().get_bits();
EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
break;
}
- case kWasmI64: {
+ case ValueType::kI64: {
uint64_t u64 = value.to_u64();
EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
break;
}
- case kWasmF64: {
+ case ValueType::kF64: {
uint64_t f64 = value.to_f64_boxed().get_bits();
EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
break;
}
- case kWasmS128: {
+ case ValueType::kS128: {
int4 s128 = value.to_s128().to_i32x4();
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
@@ -2856,16 +2881,17 @@ class ThreadImpl {
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
break;
}
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef: {
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef: {
Handle<Object> anyref = value.to_anyref();
DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef, anyref->IsNull());
encoded_values->set(encoded_index++, *anyref);
break;
}
- default:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
UNREACHABLE();
}
}
@@ -2925,32 +2951,32 @@ class ThreadImpl {
uint32_t encoded_index = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
WasmValue value;
- switch (sig->GetParam(i)) {
- case kWasmI32: {
+ switch (sig->GetParam(i).kind()) {
+ case ValueType::kI32: {
uint32_t u32 = 0;
DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
value = WasmValue(u32);
break;
}
- case kWasmF32: {
+ case ValueType::kF32: {
uint32_t f32_bits = 0;
DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
value = WasmValue(Float32::FromBits(f32_bits));
break;
}
- case kWasmI64: {
+ case ValueType::kI64: {
uint64_t u64 = 0;
DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
value = WasmValue(u64);
break;
}
- case kWasmF64: {
+ case ValueType::kF64: {
uint64_t f64_bits = 0;
DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
value = WasmValue(Float64::FromBits(f64_bits));
break;
}
- case kWasmS128: {
+ case ValueType::kS128: {
int4 s128 = {0, 0, 0, 0};
uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
@@ -2960,16 +2986,17 @@ class ThreadImpl {
value = WasmValue(Simd128(s128));
break;
}
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef: {
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef: {
Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef, anyref->IsNull());
value = WasmValue(anyref);
break;
}
- default:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
UNREACHABLE();
}
Push(value);
@@ -3032,8 +3059,10 @@ class ThreadImpl {
}
// If max is 0, break. If max is positive (a limit is set), decrement it.
- if (max == 0) break;
- if (max > 0) --max;
+ if (max >= 0 && WasmOpcodes::IsBreakable(opcode)) {
+ if (max == 0) break;
+ --max;
+ }
USE(skip);
TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
@@ -3094,6 +3123,7 @@ class ThreadImpl {
case kExprRethrow: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue ex = Pop();
+ if (ex.to_anyref()->IsNull()) return DoTrap(kTrapRethrowNullRef, pc);
CommitPc(pc); // Needed for local unwinding.
if (!DoRethrowException(ex)) return;
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
@@ -3105,6 +3135,7 @@ class ThreadImpl {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue ex = Pop();
Handle<Object> exception = ex.to_anyref();
+ if (exception->IsNull()) return DoTrap(kTrapBrOnExnNullRef, pc);
if (MatchingExceptionTag(exception, imm.index.index)) {
imm.index.exception = &module()->exceptions[imm.index.index];
DoUnpackException(imm.index.exception, exception);
@@ -3408,9 +3439,9 @@ class ThreadImpl {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
auto& global = module()->globals[imm.index];
- switch (global.type) {
+ switch (global.type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
- case valuetype: { \
+ case ValueType::valuetype: { \
uint8_t* ptr = \
WasmInstanceObject::GetGlobalStorage(instance_object_, global); \
WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
@@ -3419,10 +3450,10 @@ class ThreadImpl {
}
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef: {
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index; // The index into the buffer.
@@ -3434,7 +3465,8 @@ class ThreadImpl {
global_buffer->set(global_index, *ref);
break;
}
- default:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
UNREACHABLE();
}
len = 1 + imm.length;
@@ -3791,27 +3823,28 @@ class ThreadImpl {
sp_t plimit = top ? top->plimit() : 0;
sp_t llimit = top ? top->llimit() : 0;
for (size_t i = sp; i < StackHeight(); ++i) {
- if (i < plimit)
+ if (i < plimit) {
PrintF(" p%zu:", i);
- else if (i < llimit)
+ } else if (i < llimit) {
PrintF(" l%zu:", i);
- else
+ } else {
PrintF(" s%zu:", i);
+ }
WasmValue val = GetStackValue(i);
- switch (val.type()) {
- case kWasmI32:
+ switch (val.type().kind()) {
+ case ValueType::kI32:
PrintF("i32:%d", val.to<int32_t>());
break;
- case kWasmI64:
+ case ValueType::kI64:
PrintF("i64:%" PRId64 "", val.to<int64_t>());
break;
- case kWasmF32:
+ case ValueType::kF32:
PrintF("f32:%f", val.to<float>());
break;
- case kWasmF64:
+ case ValueType::kF64:
PrintF("f64:%lf", val.to<double>());
break;
- case kWasmS128: {
+ case ValueType::kS128: {
// This defaults to tracing all S128 values as i32x4 values for now,
// when there is more state to know what type of values are on the
// stack, the right format should be printed here.
@@ -3819,7 +3852,7 @@ class ThreadImpl {
PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
break;
}
- case kWasmAnyRef: {
+ case ValueType::kAnyRef: {
Handle<Object> ref = val.to_anyref();
if (ref->IsNull()) {
PrintF("ref:null");
@@ -3828,10 +3861,15 @@ class ThreadImpl {
}
break;
}
- case kWasmStmt:
+ case ValueType::kStmt:
PrintF("void");
break;
- default:
+ case ValueType::kFuncRef:
+ case ValueType::kExnRef:
+ case ValueType::kNullRef:
+ PrintF("(func|null|exn)ref:unimplemented");
+ break;
+ case ValueType::kBottom:
UNREACHABLE();
break;
}
@@ -3850,7 +3888,7 @@ class ThreadImpl {
ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
Handle<Object> object_ref,
const WasmCode* code,
- FunctionSig* sig) {
+ const FunctionSig* sig) {
int num_args = static_cast<int>(sig->parameter_count());
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
@@ -3872,23 +3910,23 @@ class ThreadImpl {
sp_t base_index = StackHeight() - num_args;
for (int i = 0; i < num_args; ++i) {
WasmValue arg = GetStackValue(base_index + i);
- switch (sig->GetParam(i)) {
- case kWasmI32:
+ switch (sig->GetParam(i).kind()) {
+ case ValueType::kI32:
packer.Push(arg.to<uint32_t>());
break;
- case kWasmI64:
+ case ValueType::kI64:
packer.Push(arg.to<uint64_t>());
break;
- case kWasmF32:
+ case ValueType::kF32:
packer.Push(arg.to<float>());
break;
- case kWasmF64:
+ case ValueType::kF64:
packer.Push(arg.to<double>());
break;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef:
DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef,
arg.to_anyref()->IsNull());
packer.Push(arg.to_anyref()->ptr());
@@ -3914,23 +3952,23 @@ class ThreadImpl {
// Push return values.
packer.Reset();
for (size_t i = 0; i < sig->return_count(); i++) {
- switch (sig->GetReturn(i)) {
- case kWasmI32:
+ switch (sig->GetReturn(i).kind()) {
+ case ValueType::kI32:
Push(WasmValue(packer.Pop<uint32_t>()));
break;
- case kWasmI64:
+ case ValueType::kI64:
Push(WasmValue(packer.Pop<uint64_t>()));
break;
- case kWasmF32:
+ case ValueType::kF32:
Push(WasmValue(packer.Pop<float>()));
break;
- case kWasmF64:
+ case ValueType::kF64:
Push(WasmValue(packer.Pop<double>()));
break;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef: {
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef: {
Handle<Object> ref(Object(packer.Pop<Address>()), isolate);
DCHECK_IMPLIES(sig->GetReturn(i) == kWasmNullRef, ref->IsNull());
Push(WasmValue(ref));
@@ -3977,7 +4015,7 @@ class ThreadImpl {
// and compiled we may get an exception.
if (code == nullptr) return TryHandleException(isolate_);
- FunctionSig* sig = module()->functions[function_index].sig;
+ const FunctionSig* sig = module()->functions[function_index].sig;
return CallExternalWasmFunction(isolate_, object_ref, code, sig);
}
@@ -4002,7 +4040,7 @@ class ThreadImpl {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- FunctionSig* signature = module()->signatures[sig_index];
+ const FunctionSig* signature = module()->signatures[sig_index];
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
WasmCode* code = GetTargetCode(isolate_, entry.target());
@@ -4319,7 +4357,13 @@ ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
// Create some dummy structures, to avoid special-casing the implementation
// just for testing.
FunctionSig sig(0, 0, nullptr);
- WasmFunction function{&sig, 0, 0, {0, 0}, false, false};
+ WasmFunction function{&sig, // sig
+ 0, // func_index
+ 0, // sig_index
+ {0, 0}, // code
+ false, // imported
+ false, // exported
+ false}; // declared
InterpreterCode code{
&function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index cb1f24734f..fdc02771b4 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -24,12 +24,14 @@
#include "src/tasks/task-utils.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
using v8::internal::wasm::ErrorThrower;
+using v8::internal::wasm::ScheduledErrorThrower;
namespace v8 {
@@ -137,35 +139,6 @@ namespace {
} \
} while (false)
-// Like an ErrorThrower, but turns all pending exceptions into scheduled
-// exceptions when going out of scope. Use this in API methods.
-// Note that pending exceptions are not necessarily created by the ErrorThrower,
-// but e.g. by the wasm start function. There might also be a scheduled
-// exception, created by another API call (e.g. v8::Object::Get). But there
-// should never be both pending and scheduled exceptions.
-class ScheduledErrorThrower : public ErrorThrower {
- public:
- ScheduledErrorThrower(i::Isolate* isolate, const char* context)
- : ErrorThrower(isolate, context) {}
-
- ~ScheduledErrorThrower();
-};
-
-ScheduledErrorThrower::~ScheduledErrorThrower() {
- // There should never be both a pending and a scheduled exception.
- DCHECK(!isolate()->has_scheduled_exception() ||
- !isolate()->has_pending_exception());
- // Don't throw another error if there is already a scheduled error.
- if (isolate()->has_scheduled_exception()) {
- Reset();
- } else if (isolate()->has_pending_exception()) {
- Reset();
- isolate()->OptionalRescheduleException(false);
- } else if (error()) {
- isolate()->ScheduleThrow(*Reify());
- }
-}
-
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
@@ -1148,14 +1121,15 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
int64_t initial = 0;
if (!GetInitialOrMinimumProperty(isolate, &thrower, context, descriptor,
- &initial, 0, i::wasm::max_mem_pages())) {
+ &initial, 0,
+ i::wasm::max_initial_mem_pages())) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "maximum"), nullptr, &maximum,
- initial, i::wasm::kSpecMaxWasmMemoryPages)) {
+ initial, i::wasm::max_maximum_mem_pages())) {
return;
}
@@ -1305,8 +1279,8 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Convert value to a WebAssembly value, the default value is 0.
Local<v8::Value> value = Local<Value>::Cast(args[1]);
- switch (type) {
- case i::wasm::kWasmI32: {
+ switch (type.kind()) {
+ case i::wasm::ValueType::kI32: {
int32_t i32_value = 0;
if (!value->IsUndefined()) {
v8::Local<v8::Int32> int32_value;
@@ -1316,7 +1290,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetI32(i32_value);
break;
}
- case i::wasm::kWasmI64: {
+ case i::wasm::ValueType::kI64: {
int64_t i64_value = 0;
if (!value->IsUndefined()) {
if (!enabled_features.has_bigint()) {
@@ -1331,7 +1305,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetI64(i64_value);
break;
}
- case i::wasm::kWasmF32: {
+ case i::wasm::ValueType::kF32: {
float f32_value = 0;
if (!value->IsUndefined()) {
double f64_value = 0;
@@ -1343,7 +1317,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF32(f32_value);
break;
}
- case i::wasm::kWasmF64: {
+ case i::wasm::ValueType::kF64: {
double f64_value = 0;
if (!value->IsUndefined()) {
v8::Local<v8::Number> number_value;
@@ -1353,8 +1327,8 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF64(f64_value);
break;
}
- case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmExnRef: {
+ case i::wasm::ValueType::kAnyRef:
+ case i::wasm::ValueType::kExnRef: {
if (args.Length() < 2) {
// When no initial value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
@@ -1364,7 +1338,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetAnyRef(Utils::OpenHandle(*value));
break;
}
- case i::wasm::kWasmNullRef:
+ case i::wasm::ValueType::kNullRef:
if (args.Length() < 2) {
// When no initial value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
@@ -1375,7 +1349,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("The value of nullref globals must be null");
}
break;
- case i::wasm::kWasmFuncRef: {
+ case i::wasm::ValueType::kFuncRef: {
if (args.Length() < 2) {
// When no initial value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
@@ -1390,7 +1364,9 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
break;
}
- default:
+ case i::wasm::ValueType::kStmt:
+ case i::wasm::ValueType::kS128:
+ case i::wasm::ValueType::kBottom:
UNREACHABLE();
}
@@ -1514,10 +1490,34 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("Argument 1 must be a function");
return;
}
+ const i::wasm::FunctionSig* sig = builder.Build();
- i::wasm::FunctionSig* sig = builder.Build();
i::Handle<i::JSReceiver> callable =
Utils::OpenHandle(*args[1].As<Function>());
+ if (i::WasmExportedFunction::IsWasmExportedFunction(*callable)) {
+ if (*i::Handle<i::WasmExportedFunction>::cast(callable)->sig() == *sig) {
+ args.GetReturnValue().Set(Utils::ToLocal(callable));
+ return;
+ }
+
+ thrower.TypeError(
+ "The signature of Argument 1 (a WebAssembly function) does "
+ "not match the signature specified in Argument 0");
+ return;
+ }
+
+ if (i::WasmJSFunction::IsWasmJSFunction(*callable)) {
+ if (i::Handle<i::WasmJSFunction>::cast(callable)->MatchesSignature(sig)) {
+ args.GetReturnValue().Set(Utils::ToLocal(callable));
+ return;
+ }
+
+ thrower.TypeError(
+ "The signature of Argument 1 (a WebAssembly function) does "
+ "not match the signature specified in Argument 0");
+ return;
+ }
+
i::Handle<i::JSFunction> result =
i::WasmJSFunction::New(i_isolate, sig, callable);
args.GetReturnValue().Set(Utils::ToLocal(result));
@@ -1530,7 +1530,7 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Function.type()");
- i::wasm::FunctionSig* sig;
+ const i::wasm::FunctionSig* sig;
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
@@ -1697,8 +1697,8 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
uint64_t max_size64 = receiver->maximum_pages();
- if (max_size64 > uint64_t{i::wasm::max_mem_pages()}) {
- max_size64 = i::wasm::max_mem_pages();
+ if (max_size64 > uint64_t{i::wasm::max_maximum_mem_pages()}) {
+ max_size64 = i::wasm::max_maximum_mem_pages();
}
i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer(), i_isolate);
@@ -1783,11 +1783,11 @@ void WebAssemblyGlobalGetValueCommon(
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- switch (receiver->type()) {
- case i::wasm::kWasmI32:
+ switch (receiver->type().kind()) {
+ case i::wasm::ValueType::kI32:
return_value.Set(receiver->GetI32());
break;
- case i::wasm::kWasmI64: {
+ case i::wasm::ValueType::kI64: {
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
if (enabled_features.has_bigint()) {
Local<BigInt> value = BigInt::New(isolate, receiver->GetI64());
@@ -1798,21 +1798,23 @@ void WebAssemblyGlobalGetValueCommon(
}
break;
}
- case i::wasm::kWasmF32:
+ case i::wasm::ValueType::kF32:
return_value.Set(receiver->GetF32());
break;
- case i::wasm::kWasmF64:
+ case i::wasm::ValueType::kF64:
return_value.Set(receiver->GetF64());
break;
- case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmFuncRef:
- case i::wasm::kWasmNullRef:
- case i::wasm::kWasmExnRef:
+ case i::wasm::ValueType::kAnyRef:
+ case i::wasm::ValueType::kFuncRef:
+ case i::wasm::ValueType::kNullRef:
+ case i::wasm::ValueType::kExnRef:
DCHECK_IMPLIES(receiver->type() == i::wasm::kWasmNullRef,
receiver->GetRef()->IsNull());
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
- default:
+ case i::wasm::ValueType::kBottom:
+ case i::wasm::ValueType::kStmt:
+ case i::wasm::ValueType::kS128:
UNREACHABLE();
}
}
@@ -1847,14 +1849,14 @@ void WebAssemblyGlobalSetValue(
return;
}
- switch (receiver->type()) {
- case i::wasm::kWasmI32: {
+ switch (receiver->type().kind()) {
+ case i::wasm::ValueType::kI32: {
int32_t i32_value = 0;
if (!args[0]->Int32Value(context).To(&i32_value)) return;
receiver->SetI32(i32_value);
break;
}
- case i::wasm::kWasmI64: {
+ case i::wasm::ValueType::kI64: {
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
if (enabled_features.has_bigint()) {
v8::Local<v8::BigInt> bigint_value;
@@ -1865,29 +1867,29 @@ void WebAssemblyGlobalSetValue(
}
break;
}
- case i::wasm::kWasmF32: {
+ case i::wasm::ValueType::kF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
receiver->SetF32(i::DoubleToFloat32(f64_value));
break;
}
- case i::wasm::kWasmF64: {
+ case i::wasm::ValueType::kF64: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
receiver->SetF64(f64_value);
break;
}
- case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmExnRef: {
+ case i::wasm::ValueType::kAnyRef:
+ case i::wasm::ValueType::kExnRef: {
receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
break;
}
- case i::wasm::kWasmNullRef:
+ case i::wasm::ValueType::kNullRef:
if (!receiver->SetNullRef(Utils::OpenHandle(*args[0]))) {
thrower.TypeError("The value of nullref must be null");
}
break;
- case i::wasm::kWasmFuncRef: {
+ case i::wasm::ValueType::kFuncRef: {
if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
thrower.TypeError(
"value of an anyfunc reference must be either null or an "
@@ -1895,7 +1897,9 @@ void WebAssemblyGlobalSetValue(
}
break;
}
- default:
+ case i::wasm::ValueType::kBottom:
+ case i::wasm::ValueType::kStmt:
+ case i::wasm::ValueType::kS128:
UNREACHABLE();
}
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 5206ad6212..02303fb69d 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -9,13 +9,17 @@
#include <cstdint>
#include <limits>
+#include "src/base/macros.h"
#include "src/wasm/wasm-constants.h"
namespace v8 {
namespace internal {
namespace wasm {
-constexpr size_t kSpecMaxWasmMemoryPages = 65536;
+// These two constants are defined in the Wasm JS API spec and as such only
+// concern JS embeddings.
+constexpr size_t kSpecMaxWasmInitialMemoryPages = 32767;
+constexpr size_t kSpecMaxWasmMaximumMemoryPages = 65536;
// The following limits are imposed by V8 on WebAssembly modules.
// The limits are agreed upon with other engines for consistency.
@@ -27,7 +31,9 @@ constexpr size_t kV8MaxWasmGlobals = 1000000;
constexpr size_t kV8MaxWasmExceptions = 1000000;
constexpr size_t kV8MaxWasmExceptionTypes = 1000000;
constexpr size_t kV8MaxWasmDataSegments = 100000;
-// Don't use this limit directly, but use the value of {max_mem_pages()}.
+// This indicates the maximum memory size our implementation supports.
+// Don't use this limit directly; use {max_initial_mem_pages()} instead
+// to take the spec'ed limit as well as command line flag into account.
constexpr size_t kV8MaxWasmMemoryPages = 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
@@ -43,8 +49,6 @@ constexpr size_t kV8MaxWasmTableInitEntries = 10000000;
constexpr size_t kV8MaxWasmTables = 1;
constexpr size_t kV8MaxWasmMemories = 1;
-static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
- "v8 should not be more permissive than the spec");
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
"v8 should not exceed WebAssembly's non-web embedding limits");
static_assert(kV8MaxWasmTableInitEntries <= kV8MaxWasmTableSize,
@@ -58,11 +62,12 @@ constexpr uint64_t kWasmMaxHeapOffset =
// Defined in wasm-engine.cc.
// TODO(wasm): Make this size_t for wasm64. Currently the --wasm-max-mem-pages
// flag is only uint32_t.
-V8_EXPORT_PRIVATE uint32_t max_mem_pages();
+V8_EXPORT_PRIVATE uint32_t max_initial_mem_pages();
+V8_EXPORT_PRIVATE uint32_t max_maximum_mem_pages();
uint32_t max_table_init_entries();
inline uint64_t max_mem_bytes() {
- return uint64_t{max_mem_pages()} * kWasmPageSize;
+ return uint64_t{max_maximum_mem_pages()} * kWasmPageSize;
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 1bd32ef561..7e56ea6eae 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -15,7 +15,7 @@ namespace internal {
namespace wasm {
// TODO(wasm): optimize calling conventions to be both closer to C++ (to
-// reduce adapter costs for fast WASM <-> C++ calls) and to be more efficient
+// reduce adapter costs for fast Wasm <-> C++ calls) and to be more efficient
// in general.
#if V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
deleted file mode 100644
index bbb0d67f9c..0000000000
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ /dev/null
@@ -1,633 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "src/heap/heap-inl.h"
-#include "src/logging/counters.h"
-#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-memory.h"
-#include "src/wasm/wasm-module.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-namespace {
-
-constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
-
-void AddAllocationStatusSample(Isolate* isolate,
- WasmMemoryTracker::AllocationStatus status) {
- isolate->counters()->wasm_memory_allocation_result()->AddSample(
- static_cast<int>(status));
-}
-
-bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
- bool* did_retry) {
- // Try up to three times; getting rid of dead JSArrayBuffer allocations might
- // require two GCs because the first GC maybe incremental and may have
- // floating garbage.
- static constexpr int kAllocationRetries = 2;
-
- for (int trial = 0;; ++trial) {
- if (fn()) return true;
- // {fn} failed. If {kAllocationRetries} is reached, fail.
- *did_retry = true;
- if (trial == kAllocationRetries) return false;
- // Otherwise, collect garbage and retry.
- // TODO(wasm): Since reservation limits are engine-wide, we should do an
- // engine-wide GC here (i.e. trigger a GC in each isolate using the engine,
- // and wait for them all to finish). See https://crbug.com/v8/9405.
- heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
- }
-}
-
-void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
- size_t size, size_t max_size,
- void** allocation_base,
- size_t* allocation_length) {
- using AllocationStatus = WasmMemoryTracker::AllocationStatus;
-#if V8_TARGET_ARCH_64_BIT
- constexpr bool kRequireFullGuardRegions = true;
-#else
- constexpr bool kRequireFullGuardRegions = false;
-#endif
- // Let the WasmMemoryTracker know we are going to reserve a bunch of
- // address space.
- size_t reservation_size = std::max(max_size, size);
- bool did_retry = false;
-
- auto reserve_memory_space = [&] {
- // For guard regions, we always allocate the largest possible offset
- // into the heap, so the addressable memory after the guard page can
- // be made inaccessible.
- //
- // To protect against 32-bit integer overflow issues, we also
- // protect the 2GiB before the valid part of the memory buffer.
- *allocation_length =
- kRequireFullGuardRegions
- ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
- : RoundUp(base::bits::RoundUpToPowerOfTwo(reservation_size),
- kWasmPageSize);
- DCHECK_GE(*allocation_length, size);
- DCHECK_GE(*allocation_length, kWasmPageSize);
-
- return memory_tracker->ReserveAddressSpace(*allocation_length);
- };
- if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) {
- // Reset reservation_size to initial size so that at least the initial size
- // can be allocated if maximum size reservation is not possible.
- reservation_size = size;
-
- // We are over the address space limit. Fail.
- //
- // When running under the correctness fuzzer (i.e.
- // --correctness-fuzzer-suppressions is preset), we crash
- // instead so it is not incorrectly reported as a correctness
- // violation. See https://crbug.com/828293#c4
- if (FLAG_correctness_fuzzer_suppressions) {
- FATAL("could not allocate wasm memory");
- }
- AddAllocationStatusSample(
- heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
- return nullptr;
- }
-
- // The Reserve makes the whole region inaccessible by default.
- DCHECK_NULL(*allocation_base);
- auto allocate_pages = [&] {
- *allocation_base =
- AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
- kWasmPageSize, PageAllocator::kNoAccess);
- return *allocation_base != nullptr;
- };
- if (!RunWithGCAndRetry(allocate_pages, heap, &did_retry)) {
- memory_tracker->ReleaseReservation(*allocation_length);
- AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure);
- return nullptr;
- }
-
- byte* memory = reinterpret_cast<byte*>(*allocation_base);
- if (kRequireFullGuardRegions) {
- memory += kNegativeGuardSize;
- }
-
- // Make the part we care about accessible.
- auto commit_memory = [&] {
- return size == 0 || SetPermissions(GetPlatformPageAllocator(), memory,
- RoundUp(size, kWasmPageSize),
- PageAllocator::kReadWrite);
- };
- // SetPermissions commits the extra memory, which may put us over the
- // process memory limit. If so, report this as an OOM.
- if (!RunWithGCAndRetry(commit_memory, heap, &did_retry)) {
- V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
- }
-
- memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
- *allocation_length, memory, size);
- AddAllocationStatusSample(heap->isolate(),
- did_retry ? AllocationStatus::kSuccessAfterRetry
- : AllocationStatus::kSuccess);
- return memory;
-}
-
-#if V8_TARGET_ARCH_MIPS64
-// MIPS64 has a user space of 2^40 bytes on most processors,
-// address space limits needs to be smaller.
-constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
-#elif V8_TARGET_ARCH_64_BIT
-constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
-#else
-constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
-#endif
-
-} // namespace
-
-WasmMemoryTracker::~WasmMemoryTracker() {
- // All reserved address space should be released before the allocation tracker
- // is destroyed.
- DCHECK_EQ(reserved_address_space_, 0u);
- DCHECK_EQ(allocated_address_space_, 0u);
- DCHECK(allocations_.empty());
-}
-
-void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
- Heap* heap, size_t size, void** allocation_base,
- size_t* allocation_length) {
- return TryAllocateBackingStore(this, heap, size, size, allocation_base,
- allocation_length);
-}
-
-void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
- void* buffer_start) {
- base::MutexGuard scope_lock(&mutex_);
- ReleaseAllocation_Locked(nullptr, buffer_start);
- CHECK(FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(memory.begin()), memory.size()));
-}
-
-bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
- size_t reservation_limit = kAddressSpaceLimit;
- while (true) {
- size_t old_count = reserved_address_space_.load();
- if (old_count > reservation_limit) return false;
- if (reservation_limit - old_count < num_bytes) return false;
- if (reserved_address_space_.compare_exchange_weak(old_count,
- old_count + num_bytes)) {
- return true;
- }
- }
-}
-
-void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
- size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
- USE(old_reserved);
- DCHECK_LE(num_bytes, old_reserved);
-}
-
-void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
- void* allocation_base,
- size_t allocation_length,
- void* buffer_start,
- size_t buffer_length) {
- base::MutexGuard scope_lock(&mutex_);
-
- allocated_address_space_ += allocation_length;
- // Report address space usage in MiB so the full range fits in an int on all
- // platforms.
- isolate->counters()->wasm_address_space_usage_mb()->AddSample(
- static_cast<int>(allocated_address_space_ / MB));
-
- allocations_.emplace(buffer_start,
- AllocationData{allocation_base, allocation_length,
- buffer_start, buffer_length});
-}
-
-WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
- Isolate* isolate, const void* buffer_start) {
- auto find_result = allocations_.find(buffer_start);
- CHECK_NE(find_result, allocations_.end());
-
- size_t num_bytes = find_result->second.allocation_length;
- DCHECK_LE(num_bytes, reserved_address_space_);
- DCHECK_LE(num_bytes, allocated_address_space_);
- reserved_address_space_ -= num_bytes;
- allocated_address_space_ -= num_bytes;
-
- AllocationData allocation_data = find_result->second;
- allocations_.erase(find_result);
- return allocation_data;
-}
-
-const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
- const void* buffer_start) {
- base::MutexGuard scope_lock(&mutex_);
- const auto& result = allocations_.find(buffer_start);
- if (result != allocations_.end()) {
- return &result->second;
- }
- return nullptr;
-}
-
-bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
- base::MutexGuard scope_lock(&mutex_);
- return allocations_.find(buffer_start) != allocations_.end();
-}
-
-bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
- base::MutexGuard scope_lock(&mutex_);
- const auto& result = allocations_.find(buffer_start);
- // Should be a wasm allocation, and registered as a shared allocation.
- return (result != allocations_.end() && result->second.is_shared);
-}
-
-void WasmMemoryTracker::MarkWasmMemoryNotGrowable(
- Handle<JSArrayBuffer> buffer) {
- base::MutexGuard scope_lock(&mutex_);
- const auto& allocation = allocations_.find(buffer->backing_store());
- if (allocation == allocations_.end()) return;
- allocation->second.is_growable = false;
-}
-
-bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer) {
- base::MutexGuard scope_lock(&mutex_);
- if (buffer->backing_store() == nullptr) return true;
- const auto& allocation = allocations_.find(buffer->backing_store());
- if (allocation == allocations_.end()) return false;
- return allocation->second.is_growable;
-}
-
-bool WasmMemoryTracker::FreeWasmMemory(Isolate* isolate,
- const void* buffer_start) {
- base::MutexGuard scope_lock(&mutex_);
- const auto& result = allocations_.find(buffer_start);
- if (result == allocations_.end()) return false;
- if (result->second.is_shared) {
- // This is a shared WebAssembly.Memory allocation
- FreeMemoryIfNotShared_Locked(isolate, buffer_start);
- return true;
- }
- // This is a WebAssembly.Memory allocation
- const AllocationData allocation =
- ReleaseAllocation_Locked(isolate, buffer_start);
- CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
- allocation.allocation_length));
- return true;
-}
-
-void WasmMemoryTracker::RegisterWasmMemoryAsShared(
- Handle<WasmMemoryObject> object, Isolate* isolate) {
- // Only register with the tracker if shared grow is enabled.
- if (!FLAG_wasm_grow_shared_memory) return;
- const void* backing_store = object->array_buffer().backing_store();
- // TODO(V8:8810): This should be a DCHECK, currently some tests do not
- // use a full WebAssembly.Memory, and fail on registering so return early.
- if (!IsWasmMemory(backing_store)) return;
- {
- base::MutexGuard scope_lock(&mutex_);
- // Register as shared allocation when it is post messaged. This happens only
- // the first time a buffer is shared over Postmessage, and track all the
- // memory objects that are associated with this backing store.
- RegisterSharedWasmMemory_Locked(object, isolate);
- // Add isolate to backing store mapping.
- isolates_per_buffer_[backing_store].emplace(isolate);
- }
-}
-
-void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
- size_t new_size) {
- base::MutexGuard scope_lock(&mutex_);
- // Keep track of the new size of the buffer associated with each backing
- // store.
- AddBufferToGrowMap_Locked(old_buffer, new_size);
- // Request interrupt to GROW_SHARED_MEMORY to other isolates
- TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
-}
-
-void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
- base::MutexGuard scope_lock(&mutex_);
- // For every buffer in the grow_entry_map_, update the size for all the
- // memory objects associated with this isolate.
- for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
- UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
- // If all the isolates that share this buffer have hit a stack check, their
- // memory objects are updated, and this grow entry can be erased.
- if (AreAllIsolatesUpdated_Locked(it->first)) {
- it = grow_update_map_.erase(it);
- } else {
- it++;
- }
- }
-}
-
-void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
- Handle<WasmMemoryObject> object, Isolate* isolate) {
- DCHECK(object->array_buffer().is_shared());
-
- void* backing_store = object->array_buffer().backing_store();
- // The allocation of a WasmMemoryObject should always be registered with the
- // WasmMemoryTracker.
- const auto& result = allocations_.find(backing_store);
- if (result == allocations_.end()) return;
-
- // Register the allocation as shared, if not alreadt marked as shared.
- if (!result->second.is_shared) result->second.is_shared = true;
-
- // Create persistent global handles for the memory objects that are shared
- GlobalHandles* global_handles = isolate->global_handles();
- object = global_handles->Create(*object);
-
- // Add to memory_object_vector to track memory objects, instance objects
- // that will need to be updated on a Grow call
- result->second.memory_object_vector.push_back(
- SharedMemoryObjectState(object, isolate));
-}
-
-void WasmMemoryTracker::AddBufferToGrowMap_Locked(
- Handle<JSArrayBuffer> old_buffer, size_t new_size) {
- void* backing_store = old_buffer->backing_store();
- auto entry = grow_update_map_.find(old_buffer->backing_store());
- if (entry == grow_update_map_.end()) {
- // No pending grow for this backing store, add to map.
- grow_update_map_.emplace(backing_store, new_size);
- return;
- }
- // If grow on the same buffer is requested before the update is complete,
- // the new_size should always be greater or equal to the old_size. Equal
- // in the case that grow(0) is called, but new buffer handles are mandated
- // by the Spec.
- CHECK_LE(entry->second, new_size);
- entry->second = new_size;
- // Flush instances_updated everytime a new grow size needs to be updates
- ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
-}
-
-void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
- Handle<JSArrayBuffer> old_buffer) {
- // Request a GrowShareMemory interrupt on all the isolates that share
- // the backing store.
- const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
- for (const auto& isolate : isolates->second) {
- isolate->stack_guard()->RequestGrowSharedMemory();
- }
-}
-
-void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
- Isolate* isolate, void* backing_store, size_t new_size) {
- // Update objects only if there are memory objects that share this backing
- // store, and this isolate is marked as one of the isolates that shares this
- // buffer.
- if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
- UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
- // As the memory objects are updated, add this isolate to a set of isolates
- // that are updated on grow. This state is maintained to track if all the
- // isolates that share the backing store have hit a StackCheck.
- isolates_updated_on_grow_[backing_store].emplace(isolate);
- }
-}
-
-bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
- const void* backing_store) {
- const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
- // No isolates share this buffer.
- if (buffer_isolates == isolates_per_buffer_.end()) return true;
- const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
- // Some isolates share the buffer, but no isolates have been updated yet.
- if (updated_isolates == isolates_updated_on_grow_.end()) return false;
- if (buffer_isolates->second == updated_isolates->second) {
- // If all the isolates that share this backing_store have hit a stack check,
- // and the memory objects have been updated, remove the entry from the
- // updatemap, and return true.
- isolates_updated_on_grow_.erase(backing_store);
- return true;
- }
- return false;
-}
-
-void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
- const void* backing_store) {
- // On multiple grows to the same buffer, the entries for that buffer should be
- // flushed. This is done so that any consecutive grows to the same buffer will
- // update all instances that share this buffer.
- const auto& value = isolates_updated_on_grow_.find(backing_store);
- if (value != isolates_updated_on_grow_.end()) {
- value->second.clear();
- }
-}
-
-void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
- Isolate* isolate, void* backing_store, size_t new_size) {
- const auto& result = allocations_.find(backing_store);
- if (result == allocations_.end() || !result->second.is_shared) return;
- for (const auto& memory_obj_state : result->second.memory_object_vector) {
- DCHECK_NE(memory_obj_state.isolate, nullptr);
- if (isolate == memory_obj_state.isolate) {
- HandleScope scope(isolate);
- Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
- DCHECK(memory_object->IsWasmMemoryObject());
- DCHECK(memory_object->array_buffer().is_shared());
- // Permissions adjusted, but create a new buffer with new size
- // and old attributes. Buffer has already been allocated,
- // just create a new buffer with same backing store.
- bool is_external = memory_object->array_buffer().is_external();
- Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
- isolate, backing_store, new_size, is_external, SharedFlag::kShared);
- memory_obj_state.memory_object->update_instances(isolate, new_buffer);
- }
- }
-}
-
-bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
- Isolate* isolate, const void* backing_store) {
- // Return true if this buffer has memory_objects it needs to update.
- const auto& result = allocations_.find(backing_store);
- if (result == allocations_.end() || !result->second.is_shared) return false;
- // Only update if the buffer has memory objects that need to be updated.
- if (result->second.memory_object_vector.empty()) return false;
- const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
- return (isolate_entry != isolates_per_buffer_.end() &&
- isolate_entry->second.count(isolate) != 0);
-}
-
-void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
- Isolate* isolate, const void* backing_store) {
- RemoveSharedBufferState_Locked(isolate, backing_store);
- if (CanFreeSharedMemory_Locked(backing_store)) {
- const AllocationData allocation =
- ReleaseAllocation_Locked(isolate, backing_store);
- CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
- allocation.allocation_length));
- }
-}
-
-bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
- const auto& value = isolates_per_buffer_.find(backing_store);
- // If no isolates share this buffer, backing store can be freed.
- // Erase the buffer entry.
- if (value == isolates_per_buffer_.end() || value->second.empty()) return true;
- return false;
-}
-
-void WasmMemoryTracker::RemoveSharedBufferState_Locked(
- Isolate* isolate, const void* backing_store) {
- if (isolate != nullptr) {
- DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
- RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
- } else {
- // This happens for externalized contents cleanup shared memory state
- // associated with this buffer across isolates.
- DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
- }
-}
-
-void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
- const void* backing_store) {
- const auto& result = allocations_.find(backing_store);
- CHECK(result != allocations_.end() && result->second.is_shared);
- auto& object_vector = result->second.memory_object_vector;
- if (object_vector.empty()) return;
- for (const auto& mem_obj_state : object_vector) {
- GlobalHandles::Destroy(mem_obj_state.memory_object.location());
- }
- object_vector.clear();
- // Remove isolate from backing store map.
- isolates_per_buffer_.erase(backing_store);
-}
-
-void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
- Isolate* isolate, const void* backing_store) {
- // This gets called when an internal handle to the ArrayBuffer should be
- // freed, on heap tear down for that isolate, remove the memory objects
- // that are associated with this buffer and isolate.
- const auto& result = allocations_.find(backing_store);
- CHECK(result != allocations_.end() && result->second.is_shared);
- auto& object_vector = result->second.memory_object_vector;
- if (object_vector.empty()) return;
- for (auto it = object_vector.begin(); it != object_vector.end();) {
- if (isolate == it->isolate) {
- GlobalHandles::Destroy(it->memory_object.location());
- it = object_vector.erase(it);
- } else {
- ++it;
- }
- }
-}
-
-void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
- Isolate* isolate, const void* backing_store) {
- const auto& isolates = isolates_per_buffer_.find(backing_store);
- if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
- return;
- isolates->second.erase(isolate);
-}
-
-void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
- base::MutexGuard scope_lock(&mutex_);
- // This is possible for buffers that are externalized, and their handles have
- // been freed, the backing store wasn't released because externalized contents
- // were using it.
- if (isolates_per_buffer_.empty()) return;
- for (auto& entry : isolates_per_buffer_) {
- if (entry.second.find(isolate) == entry.second.end()) continue;
- const void* backing_store = entry.first;
- entry.second.erase(isolate);
- DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
- }
- for (auto& buffer_isolates : isolates_updated_on_grow_) {
- auto& isolates = buffer_isolates.second;
- isolates.erase(isolate);
- }
-}
-
-Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
- size_t size, bool is_external,
- SharedFlag shared) {
- Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld);
- constexpr bool is_wasm_memory = true;
- JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
- shared, is_wasm_memory);
- buffer->set_is_detachable(false);
- return buffer;
-}
-
-MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
- size_t size,
- size_t maximum_size,
- SharedFlag shared) {
- // Enforce flag-limited maximum allocation size.
- if (size > max_mem_bytes()) return {};
-
- WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
-
- // Set by TryAllocateBackingStore.
- void* allocation_base = nullptr;
- size_t allocation_length = 0;
-
- void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
- maximum_size, &allocation_base,
- &allocation_length);
- if (memory == nullptr) return {};
-
-#if DEBUG
- // Double check the API allocator actually zero-initialized the memory.
- const byte* bytes = reinterpret_cast<const byte*>(memory);
- for (size_t i = 0; i < size; ++i) {
- DCHECK_EQ(0, bytes[i]);
- }
-#endif
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- constexpr bool is_external = false;
- return SetupArrayBuffer(isolate, memory, size, is_external, shared);
-}
-
-MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
- return AllocateAndSetupArrayBuffer(isolate, size, size,
- SharedFlag::kNotShared);
-}
-
-MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate* isolate,
- size_t initial_size,
- size_t max_size) {
- return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size,
- SharedFlag::kShared);
-}
-
-void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
- bool free_memory) {
- if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
- DCHECK(!buffer->is_detachable());
-
- const bool is_external = buffer->is_external();
- DCHECK(!buffer->is_detachable());
- if (!is_external) {
- buffer->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*buffer);
- if (free_memory) {
- // We need to free the memory before detaching the buffer because
- // FreeBackingStore reads buffer->allocation_base(), which is nulled out
- // by Detach. This means there is a dangling pointer until we detach the
- // buffer. Since there is no way for the user to directly call
- // FreeBackingStore, we can ensure this is safe.
- buffer->FreeBackingStoreFromMainThread();
- }
- }
-
- DCHECK(buffer->is_external());
- buffer->set_is_wasm_memory(false);
- buffer->set_is_detachable(true);
- buffer->Detach();
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index c8a65ba6a2..c11a69ad46 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -343,8 +343,7 @@ uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
ValueType type, bool mutability) {
- global_imports_.push_back(
- {name, ValueTypes::ValueTypeCodeFor(type), mutability});
+ global_imports_.push_back({name, type.value_type_code(), mutability});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -408,11 +407,11 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_u8(kWasmFunctionTypeCode);
buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
- buffer->write_u8(ValueTypes::ValueTypeCodeFor(param));
+ buffer->write_u8(param.value_type_code());
}
buffer->write_size(sig->return_count());
for (auto ret : sig->returns()) {
- buffer->write_u8(ValueTypes::ValueTypeCodeFor(ret));
+ buffer->write_u8(ret.value_type_code());
}
}
FixupSection(buffer, start);
@@ -455,7 +454,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer->write_size(tables_.size());
for (const WasmTable& table : tables_) {
- buffer->write_u8(ValueTypes::ValueTypeCodeFor(table.type));
+ buffer->write_u8(table.type.value_type_code());
buffer->write_u8(table.has_maximum ? kHasMaximumFlag : kNoMaximumFlag);
buffer->write_size(table.min_size);
if (table.has_maximum) buffer->write_size(table.max_size);
@@ -486,8 +485,8 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kGlobalSectionCode, buffer);
buffer->write_size(globals_.size());
- for (auto global : globals_) {
- buffer->write_u8(ValueTypes::ValueTypeCodeFor(global.type));
+ for (const WasmGlobal& global : globals_) {
+ buffer->write_u8(global.type.value_type_code());
buffer->write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
@@ -522,22 +521,22 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
break;
case WasmInitExpr::kNone: {
// No initializer, emit a default value.
- switch (global.type) {
- case kWasmI32:
+ switch (global.type.kind()) {
+ case ValueType::kI32:
buffer->write_u8(kExprI32Const);
// LEB encoding of 0.
buffer->write_u8(0);
break;
- case kWasmI64:
+ case ValueType::kI64:
buffer->write_u8(kExprI64Const);
// LEB encoding of 0.
buffer->write_u8(0);
break;
- case kWasmF32:
+ case ValueType::kF32:
buffer->write_u8(kExprF32Const);
buffer->write_f32(0.f);
break;
- case kWasmF64:
+ case ValueType::kF64:
buffer->write_u8(kExprF64Const);
buffer->write_f64(0.);
break;
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.cc b/deps/v8/src/wasm/wasm-module-sourcemap.cc
index cfe54e7c37..85a171e5ac 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.cc
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.cc
@@ -27,8 +27,7 @@ WasmModuleSourceMap::WasmModuleSourceMap(v8::Isolate* v8_isolate,
v8::Local<v8::Value> version_value, sources_value, mappings_value;
bool has_valid_version =
src_map_obj
- ->Get(context,
- v8::String::NewFromUtf8(v8_isolate, "version").ToLocalChecked())
+ ->Get(context, v8::String::NewFromUtf8Literal(v8_isolate, "version"))
.ToLocal(&version_value) &&
version_value->IsUint32();
uint32_t version = 0;
@@ -38,8 +37,7 @@ WasmModuleSourceMap::WasmModuleSourceMap(v8::Isolate* v8_isolate,
bool has_valid_sources =
src_map_obj
- ->Get(context,
- v8::String::NewFromUtf8(v8_isolate, "sources").ToLocalChecked())
+ ->Get(context, v8::String::NewFromUtf8Literal(v8_isolate, "sources"))
.ToLocal(&sources_value) &&
sources_value->IsArray();
if (!has_valid_sources) return;
@@ -48,8 +46,7 @@ WasmModuleSourceMap::WasmModuleSourceMap(v8::Isolate* v8_isolate,
v8::Local<v8::Object>::Cast(sources_value);
v8::Local<v8::Value> sources_len_value;
if (!sources_arr
- ->Get(context,
- v8::String::NewFromUtf8(v8_isolate, "length").ToLocalChecked())
+ ->Get(context, v8::String::NewFromUtf8Literal(v8_isolate, "length"))
.ToLocal(&sources_len_value))
return;
uint32_t sources_len = 0;
@@ -71,9 +68,7 @@ WasmModuleSourceMap::WasmModuleSourceMap(v8::Isolate* v8_isolate,
bool has_valid_mappings =
src_map_obj
- ->Get(
- context,
- v8::String::NewFromUtf8(v8_isolate, "mappings").ToLocalChecked())
+ ->Get(context, v8::String::NewFromUtf8Literal(v8_isolate, "mappings"))
.ToLocal(&mappings_value) &&
mappings_value->IsString();
if (!has_valid_mappings) return;
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.h b/deps/v8/src/wasm/wasm-module-sourcemap.h
index 83293ae205..7d3116f3b3 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.h
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.h
@@ -18,7 +18,7 @@ namespace wasm {
// toolchain (e.g. Emscripten). This implementation mostly complies with the
// specification (https://sourcemaps.info/spec.html), with the following
// accommodations:
-// 1. "names" field is an empty array in current source maps of WASM, hence it
+// 1. "names" field is an empty array in current source maps of Wasm, hence it
// is not handled;
// 2. The semicolons divides "mappings" field into groups, each of which
// represents a line in the generated code. As *.wasm is in binary format, there
@@ -37,13 +37,13 @@ class V8_EXPORT_PRIVATE WasmModuleSourceMap {
// and can be correctly decoded.
bool IsValid() const { return valid_; }
- // Given a function located at [start, end) in WASM Module, this function
+ // Given a function located at [start, end) in Wasm Module, this function
// checks if this function has its corresponding source code.
bool HasSource(size_t start, size_t end) const;
// Given a function's base address start and an address addr within, this
// function checks if the address can be mapped to an offset in this function.
- // For example, we have the following memory layout for WASM functions, foo
+ // For example, we have the following memory layout for Wasm functions, foo
// and bar, and O1, O2, O3 and O4 are the decoded offsets of source map:
//
// O1 --- O2 ----- O3 ----- O4
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index d75198d770..4a785c820d 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -30,19 +30,35 @@ namespace wasm {
// static
const uint32_t WasmElemSegment::kNullIndex;
-WireBytesRef DecodedFunctionNames::Lookup(const ModuleWireBytes& wire_bytes,
- uint32_t function_index) const {
+WireBytesRef DecodedFunctionNames::Lookup(
+ const ModuleWireBytes& wire_bytes, uint32_t function_index,
+ Vector<const WasmExport> export_table) const {
base::MutexGuard lock(&mutex_);
if (!function_names_) {
function_names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
- function_names_.get());
+ function_names_.get(), export_table);
}
auto it = function_names_->find(function_index);
if (it == function_names_->end()) return WireBytesRef();
return it->second;
}
+std::pair<WireBytesRef, WireBytesRef> DecodedGlobalNames::Lookup(
+ uint32_t global_index, Vector<const WasmImport> import_table,
+ Vector<const WasmExport> export_table) const {
+ base::MutexGuard lock(&mutex_);
+ if (!global_names_) {
+ global_names_.reset(
+ new std::unordered_map<uint32_t,
+ std::pair<WireBytesRef, WireBytesRef>>());
+ DecodeGlobalNames(import_table, export_table, global_names_.get());
+ }
+ auto it = global_names_->find(global_index);
+ if (it == global_names_->end()) return {};
+ return it->second;
+}
+
// static
int MaxNumExportWrappers(const WasmModule* module) {
// For each signature there may exist a wrapper, both for imported and
@@ -113,7 +129,7 @@ void DecodedFunctionNames::AddForTesting(int function_index,
AsmJsOffsetInformation::AsmJsOffsetInformation(
Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<uint8_t>::Of(encoded_offsets)) {}
+ : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
@@ -176,8 +192,8 @@ WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
// Get a string stored in the module bytes representing a function name.
WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
const WasmModule* module) const {
- return GetNameOrNull(
- module->function_names.Lookup(*this, function->func_index));
+ return GetNameOrNull(module->function_names.Lookup(
+ *this, function->func_index, VectorOf(module->export_table)));
}
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
@@ -218,38 +234,41 @@ namespace {
// Converts the given {type} into a string representation that can be used in
// reflective functions. Should be kept in sync with the {GetValueType} helper.
Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
+ // TODO(ahaas/jkummerow): This could be as simple as:
+ // return isolate->factory()->InternalizeUtf8String(type.type_name());
+ // if we clean up all occurrences of "anyfunc" in favor of "funcref".
Factory* factory = isolate->factory();
Handle<String> string;
- switch (type) {
- case i::wasm::kWasmI32: {
+ switch (type.kind()) {
+ case i::wasm::ValueType::kI32: {
string = factory->InternalizeUtf8String("i32");
break;
}
- case i::wasm::kWasmI64: {
+ case i::wasm::ValueType::kI64: {
string = factory->InternalizeUtf8String("i64");
break;
}
- case i::wasm::kWasmF32: {
+ case i::wasm::ValueType::kF32: {
string = factory->InternalizeUtf8String("f32");
break;
}
- case i::wasm::kWasmF64: {
+ case i::wasm::ValueType::kF64: {
string = factory->InternalizeUtf8String("f64");
break;
}
- case i::wasm::kWasmAnyRef: {
+ case i::wasm::ValueType::kAnyRef: {
string = factory->InternalizeUtf8String("anyref");
break;
}
- case i::wasm::kWasmFuncRef: {
+ case i::wasm::ValueType::kFuncRef: {
string = factory->InternalizeUtf8String("anyfunc");
break;
}
- case i::wasm::kWasmNullRef: {
+ case i::wasm::ValueType::kNullRef: {
string = factory->InternalizeUtf8String("nullref");
break;
}
- case i::wasm::kWasmExnRef: {
+ case i::wasm::ValueType::kExnRef: {
string = factory->InternalizeUtf8String("exnref");
break;
}
@@ -261,7 +280,7 @@ Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
} // namespace
-Handle<JSObject> GetTypeForFunction(Isolate* isolate, FunctionSig* sig) {
+Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig) {
Factory* factory = isolate->factory();
// Extract values for the {ValueType[]} arrays.
@@ -333,13 +352,12 @@ Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
Factory* factory = isolate->factory();
Handle<String> element;
- if (type == ValueType::kWasmFuncRef) {
+ if (type == kWasmFuncRef) {
// TODO(wasm): We should define the "anyfunc" string in one central place
// and then use that constant everywhere.
element = factory->InternalizeUtf8String("anyfunc");
} else {
- DCHECK(WasmFeatures::FromFlags().has_anyref() &&
- type == ValueType::kWasmAnyRef);
+ DCHECK(WasmFeatures::FromFlags().has_anyref() && type == kWasmAnyRef);
element = factory->InternalizeUtf8String("anyref");
}
@@ -641,7 +659,7 @@ size_t EstimateStoredSize(const WasmModule* module) {
VectorSize(module->elem_segments);
}
-size_t PrintSignature(Vector<char> buffer, wasm::FunctionSig* sig) {
+size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig* sig) {
if (buffer.empty()) return 0;
size_t old_size = buffer.size();
auto append_char = [&buffer](char c) {
@@ -650,11 +668,11 @@ size_t PrintSignature(Vector<char> buffer, wasm::FunctionSig* sig) {
buffer += 1;
};
for (wasm::ValueType t : sig->parameters()) {
- append_char(wasm::ValueTypes::ShortNameOf(t));
+ append_char(t.short_name());
}
append_char(':');
for (wasm::ValueType t : sig->returns()) {
- append_char(wasm::ValueTypes::ShortNameOf(t));
+ append_char(t.short_name());
}
buffer[0] = '\0';
return old_size - buffer.size();
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 9fb67a7c16..6c782607bb 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -51,12 +51,13 @@ class WireBytesRef {
// Static representation of a wasm function.
struct WasmFunction {
- FunctionSig* sig; // signature of the function.
- uint32_t func_index; // index into the function table.
- uint32_t sig_index; // index into the signature table.
- WireBytesRef code; // code of this function.
+ const FunctionSig* sig; // signature of the function.
+ uint32_t func_index; // index into the function table.
+ uint32_t sig_index; // index into the signature table.
+ WireBytesRef code; // code of this function.
bool imported;
bool exported;
+ bool declared;
};
// Static representation of a wasm global variable.
@@ -79,7 +80,7 @@ using WasmExceptionSig = FunctionSig;
// Static representation of a wasm exception type.
struct WasmException {
explicit WasmException(const WasmExceptionSig* sig) : sig(sig) {}
- FunctionSig* ToFunctionSig() const { return const_cast<FunctionSig*>(sig); }
+ const FunctionSig* ToFunctionSig() const { return sig; }
const WasmExceptionSig* sig; // type signature of the exception.
};
@@ -115,19 +116,31 @@ struct WasmElemSegment {
// Construct an active segment.
WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
- : table_index(table_index), offset(offset), active(true) {}
-
- // Construct a passive segment, which has no table index or offset.
- WasmElemSegment() : table_index(0), active(false) {}
+ : type(kWasmFuncRef),
+ table_index(table_index),
+ offset(offset),
+ status(kStatusActive) {}
+
+ // Construct a passive or declarative segment, which has no table index or
+ // offset.
+ explicit WasmElemSegment(bool declarative)
+ : type(kWasmFuncRef),
+ table_index(0),
+ status(declarative ? kStatusDeclarative : kStatusPassive) {}
// Used in the {entries} vector to represent a `ref.null` entry in a passive
// segment.
V8_EXPORT_PRIVATE static const uint32_t kNullIndex = ~0u;
+ ValueType type;
uint32_t table_index;
WasmInitExpr offset;
std::vector<uint32_t> entries;
- bool active; // true if copied automatically during instantiation.
+ enum Status {
+ kStatusActive, // copied automatically during instantiation.
+ kStatusPassive, // copied explicitly after instantiation.
+ kStatusDeclarative // purely declarative and never copied.
+ } status;
};
// Static representation of a wasm import.
@@ -181,7 +194,8 @@ struct ModuleWireBytes;
class V8_EXPORT_PRIVATE DecodedFunctionNames {
public:
WireBytesRef Lookup(const ModuleWireBytes& wire_bytes,
- uint32_t function_index) const;
+ uint32_t function_index,
+ Vector<const WasmExport> export_table) const;
void AddForTesting(int function_index, WireBytesRef name);
private:
@@ -192,6 +206,21 @@ class V8_EXPORT_PRIVATE DecodedFunctionNames {
function_names_;
};
+class V8_EXPORT_PRIVATE DecodedGlobalNames {
+ public:
+ std::pair<WireBytesRef, WireBytesRef> Lookup(
+ uint32_t global_index, Vector<const WasmImport> import_table,
+ Vector<const WasmExport> export_table) const;
+
+ private:
+ // {global_names_} is populated lazily after decoding, and therefore needs a
+ // mutex to protect concurrent modifications from multiple {WasmModuleObject}.
+ mutable base::Mutex mutex_;
+ mutable std::unique_ptr<
+ std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
+ global_names_;
+};
+
class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
public:
explicit AsmJsOffsetInformation(Vector<const byte> encoded_offsets);
@@ -242,8 +271,9 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
+ WireBytesRef code = {0, 0};
WireBytesRef name = {0, 0};
- std::vector<FunctionSig*> signatures; // by signature index
+ std::vector<const FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
@@ -257,6 +287,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
ModuleOrigin origin = kWasmOrigin; // origin of the module
DecodedFunctionNames function_names;
+ DecodedGlobalNames global_names;
std::string source_map_url;
// Asm.js source position information. Only available for modules compiled
@@ -350,7 +381,7 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
Handle<Context> context);
-Handle<JSObject> GetTypeForFunction(Isolate* isolate, FunctionSig* sig);
+Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig);
Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
ValueType type);
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
@@ -417,7 +448,7 @@ class TruncatedUserString {
// Print the signature into the given {buffer}. If {buffer} is non-empty, it
// will be null-terminated, even if the signature is cut off. Returns the number
// of characters written, excluding the terminating null-byte.
-size_t PrintSignature(Vector<char> buffer, wasm::FunctionSig*);
+size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig*);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 211a27a749..2e75981ff1 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -123,13 +123,16 @@ ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
SMI_ACCESSORS(WasmGlobalObject, flags, kFlagsOffset)
-BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, type, WasmGlobalObject::TypeBits)
+wasm::ValueType WasmGlobalObject::type() const {
+ return wasm::ValueType(TypeBits::decode(flags()));
+}
+void WasmGlobalObject::set_type(wasm::ValueType value) {
+ set_flags(TypeBits::update(flags(), value.kind()));
+}
BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, is_mutable,
WasmGlobalObject::IsMutableBit)
-int WasmGlobalObject::type_size() const {
- return wasm::ValueTypes::ElementSizeInBytes(type());
-}
+int WasmGlobalObject::type_size() const { return type().element_size_bytes(); }
Address WasmGlobalObject::address() const {
DCHECK_NE(type(), wasm::kWasmAnyRef);
@@ -155,7 +158,7 @@ double WasmGlobalObject::GetF64() {
Handle<Object> WasmGlobalObject::GetRef() {
// We use this getter for anyref, funcref, and exnref.
- DCHECK(wasm::ValueTypes::IsReferenceType(type()));
+ DCHECK(type().IsReferenceType());
return handle(tagged_buffer().get(offset()), GetIsolate());
}
@@ -231,6 +234,8 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*,
kDataSegmentSizesOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
kDroppedElemSegmentsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, hook_on_function_call_address, Address,
+ kHookOnFunctionCallAddressOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
@@ -396,14 +401,11 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
#undef PRIMITIVE_ACCESSORS
wasm::ValueType WasmTableObject::type() {
- return static_cast<wasm::ValueType>(raw_type());
+ return wasm::ValueType(static_cast<wasm::ValueType::Kind>(raw_type()));
}
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
-// WasmExceptionTag
-TQ_SMI_ACCESSORS(WasmExceptionTag, index)
-
// AsmWasmData
ACCESSORS(AsmWasmData, managed_native_module, Managed<wasm::NativeModule>,
kManagedNativeModuleOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 15fa129d2b..41b9d50312 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -243,7 +243,7 @@ MaybeHandle<String> WasmModuleObject::GetFunctionNameOrNull(
DCHECK_LT(func_index, module_object->module()->functions.size());
wasm::WireBytesRef name = module_object->module()->function_names.Lookup(
wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()),
- func_index);
+ func_index, VectorOf(module_object->module()->export_table));
if (!name.is_set()) return {};
return ExtractUtf8StringFromModuleBytes(isolate, module_object, name,
kNoInternalize);
@@ -256,7 +256,8 @@ Handle<String> WasmModuleObject::GetFunctionName(
GetFunctionNameOrNull(isolate, module_object, func_index);
if (!name.is_null()) return name.ToHandleChecked();
EmbeddedVector<char, 32> buffer;
- int length = SNPrintF(buffer, "wasm-function[%u]", func_index);
+ DCHECK_GE(func_index, module_object->module()->num_imported_functions);
+ int length = SNPrintF(buffer, "func%u", func_index);
return isolate->factory()
->NewStringFromOneByte(Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
@@ -266,8 +267,8 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
uint32_t func_index) {
DCHECK_GT(module()->functions.size(), func_index);
wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
- wasm::WireBytesRef name_ref =
- module()->function_names.Lookup(wire_bytes, func_index);
+ wasm::WireBytesRef name_ref = module()->function_names.Lookup(
+ wire_bytes, func_index, VectorOf(module()->export_table));
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
return Vector<const uint8_t>::cast(name);
}
@@ -299,7 +300,7 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
table_obj->set_entries(*backing_store);
table_obj->set_current_length(initial);
table_obj->set_maximum_length(*max);
- table_obj->set_raw_type(static_cast<int>(type));
+ table_obj->set_raw_type(static_cast<int>(type.kind()));
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
if (entries != nullptr) {
@@ -516,7 +517,7 @@ void WasmTableObject::Fill(Isolate* isolate, Handle<WasmTableObject> table,
void WasmTableObject::UpdateDispatchTables(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
- wasm::FunctionSig* sig, Handle<WasmInstanceObject> target_instance,
+ const wasm::FunctionSig* sig, Handle<WasmInstanceObject> target_instance,
int target_func_index) {
// We simply need to update the IFTs for each instance that imports
// this table.
@@ -879,26 +880,22 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
if (old_buffer->is_asmjs_memory()) return -1;
// Checks for maximum memory size.
- uint32_t maximum_pages = wasm::max_mem_pages();
+ uint32_t maximum_pages = wasm::max_maximum_mem_pages();
if (memory_object->has_maximum_pages()) {
maximum_pages = std::min(
maximum_pages, static_cast<uint32_t>(memory_object->maximum_pages()));
}
- CHECK_GE(wasm::max_mem_pages(), maximum_pages);
+ DCHECK_GE(wasm::max_maximum_mem_pages(), maximum_pages);
size_t old_size = old_buffer->byte_length();
- CHECK_EQ(0, old_size % wasm::kWasmPageSize);
+ DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
size_t old_pages = old_size / wasm::kWasmPageSize;
- CHECK_GE(wasm::max_mem_pages(), old_pages);
- if ((pages > maximum_pages - old_pages) || // exceeds remaining
- (pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
- return -1;
- }
+ CHECK_GE(wasm::max_maximum_mem_pages(), old_pages);
+ if (pages > maximum_pages - old_pages) return -1;
std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
if (!backing_store) return -1;
// Compute new size.
size_t new_pages = old_pages + pages;
- size_t new_byte_length = new_pages * wasm::kWasmPageSize;
// Try to handle shared memory first.
if (old_buffer->is_shared()) {
@@ -909,6 +906,8 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
new_pages);
// Broadcasting the update should update this memory object too.
CHECK_NE(*old_buffer, memory_object->array_buffer());
+ // If the allocation succeeded, then this can't possibly overflow:
+ size_t new_byte_length = new_pages * wasm::kWasmPageSize;
// This is a less than check, as it is not guaranteed that the SAB
// length here will be equal to the stashed length above as calls to
// grow the same memory object can come in from different workers.
@@ -933,7 +932,13 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// Try allocating a new backing store and copying.
std::unique_ptr<BackingStore> new_backing_store =
backing_store->CopyWasmMemory(isolate, new_pages);
- if (!new_backing_store) return -1;
+ if (!new_backing_store) {
+ // Crash on out-of-memory if the correctness fuzzer is running.
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("could not grow wasm memory");
+ }
+ return -1;
+ }
// Detach old and create a new one with the new backing store.
old_buffer->Detach(true);
@@ -962,7 +967,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_is_mutable(is_mutable);
}
- if (wasm::ValueTypes::IsReferenceType(type)) {
+ if (type.IsReferenceType()) {
DCHECK(maybe_untagged_buffer.is_null());
Handle<FixedArray> tagged_buffer;
if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
@@ -974,7 +979,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_tagged_buffer(*tagged_buffer);
} else {
DCHECK(maybe_tagged_buffer.is_null());
- uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
+ uint32_t type_size = type.element_size_bytes();
Handle<JSArrayBuffer> untagged_buffer;
if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
@@ -1086,7 +1091,7 @@ void ImportedFunctionEntry::SetWasmToJs(
void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance,
Address call_target) {
- TRACE_IFT("Import WASM 0x%" PRIxPTR "[%d] = {instance=0x%" PRIxPTR
+ TRACE_IFT("Import Wasm 0x%" PRIxPTR "[%d] = {instance=0x%" PRIxPTR
", target=0x%" PRIxPTR "}\n",
instance_->ptr(), index_, instance.ptr(), call_target);
instance_->imported_function_refs().set(index_, instance);
@@ -1233,6 +1238,8 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_module_object(*module_object);
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
+ instance->set_hook_on_function_call_address(
+ isolate->debug()->hook_on_function_call_address());
// Insert the new instance into the scripts weak list of instances. This list
// is used for breakpoints affecting all instances belonging to the script.
@@ -1286,7 +1293,11 @@ void WasmInstanceObject::InitElemSegmentArrays(
auto module = module_object->module();
auto num_elem_segments = module->elem_segments.size();
for (size_t i = 0; i < num_elem_segments; ++i) {
- instance->dropped_elem_segments()[i] = 0;
+ instance->dropped_elem_segments()[i] =
+ module->elem_segments[i].status ==
+ wasm::WasmElemSegment::kStatusDeclarative
+ ? 1
+ : 0;
}
}
@@ -1437,7 +1448,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
Zone zone(isolate->allocator(), ZONE_NAME);
- wasm::FunctionSig* sig = js_function->GetSignature(&zone);
+ const wasm::FunctionSig* sig = js_function->GetSignature(&zone);
auto sig_id = instance->module()->signature_map.Find(*sig);
// Compile a wrapper for the target callable.
@@ -1458,8 +1469,9 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
isolate->wasm_engine(), &env, kind, sig, false);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), GetCodeKind(result),
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
wasm::ExecutionTier::kNone);
wasm::WasmCode* published_code =
native_module->PublishCode(std::move(wasm_code));
@@ -1480,7 +1492,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
// static
uint8_t* WasmInstanceObject::GetGlobalStorage(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
- DCHECK(!wasm::ValueTypes::IsReferenceType(global.type));
+ DCHECK(!global.type.IsReferenceType());
if (global.mutability && global.imported) {
return reinterpret_cast<byte*>(
instance->imported_mutable_globals()[global.index]);
@@ -1493,7 +1505,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage(
std::pair<Handle<FixedArray>, uint32_t>
WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
const wasm::WasmGlobal& global) {
- DCHECK(wasm::ValueTypes::IsReferenceType(global.type));
+ DCHECK(global.type.IsReferenceType());
Isolate* isolate = instance->GetIsolate();
if (global.mutability && global.imported) {
Handle<FixedArray> buffer(
@@ -1508,10 +1520,37 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
}
// static
+MaybeHandle<String> WasmInstanceObject::GetGlobalNameOrNull(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t global_index) {
+ wasm::ModuleWireBytes wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+
+ // This is pair of <module_name, field_name>.
+ // If field_name is not set then we don't generate a name. Else if module_name
+ // is set then it is imported global. Otherwise it is exported global.
+ std::pair<wasm::WireBytesRef, wasm::WireBytesRef> name_ref =
+ instance->module()->global_names.Lookup(
+ global_index, VectorOf(instance->module()->import_table),
+ VectorOf(instance->module()->export_table));
+ if (!name_ref.second.is_set()) return {};
+ Vector<const char> field_name = wire_bytes.GetNameOrNull(name_ref.second);
+ if (!name_ref.first.is_set()) {
+ return isolate->factory()->NewStringFromUtf8(VectorOf(field_name));
+ }
+ Vector<const char> module_name = wire_bytes.GetNameOrNull(name_ref.first);
+ std::string global_name;
+ global_name.append(module_name.begin(), module_name.end());
+ global_name.append(".");
+ global_name.append(field_name.begin(), field_name.end());
+ return isolate->factory()->NewStringFromUtf8(VectorOf(global_name));
+}
+
+// static
wasm::WasmValue WasmInstanceObject::GetGlobalValue(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
Isolate* isolate = instance->GetIsolate();
- if (wasm::ValueTypes::IsReferenceType(global.type)) {
+ if (global.type.IsReferenceType()) {
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
std::tie(global_buffer, global_index) =
@@ -1520,9 +1559,9 @@ wasm::WasmValue WasmInstanceObject::GetGlobalValue(
}
Address ptr = reinterpret_cast<Address>(GetGlobalStorage(instance, global));
using wasm::Simd128;
- switch (global.type) {
+ switch (global.type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
- case wasm::valuetype: \
+ case wasm::ValueType::valuetype: \
return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(ptr));
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
@@ -1645,8 +1684,7 @@ namespace {
constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
size_t ComputeEncodedElementSize(wasm::ValueType type) {
- size_t byte_size =
- static_cast<size_t>(wasm::ValueTypes::ElementSizeInBytes(type));
+ size_t byte_size = type.element_size_bytes();
DCHECK_EQ(byte_size % kBytesPerExceptionValuesArrayElement, 0);
DCHECK_LE(1, byte_size / kBytesPerExceptionValuesArrayElement);
return byte_size / kBytesPerExceptionValuesArrayElement;
@@ -1662,28 +1700,29 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
const wasm::WasmExceptionSig* sig = exception->sig;
uint32_t encoded_size = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
- switch (sig->GetParam(i)) {
- case wasm::kWasmI32:
- case wasm::kWasmF32:
+ switch (sig->GetParam(i).kind()) {
+ case wasm::ValueType::kI32:
+ case wasm::ValueType::kF32:
DCHECK_EQ(2, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 2;
break;
- case wasm::kWasmI64:
- case wasm::kWasmF64:
+ case wasm::ValueType::kI64:
+ case wasm::ValueType::kF64:
DCHECK_EQ(4, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 4;
break;
- case wasm::kWasmS128:
+ case wasm::ValueType::kS128:
DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 8;
break;
- case wasm::kWasmAnyRef:
- case wasm::kWasmFuncRef:
- case wasm::kWasmNullRef:
- case wasm::kWasmExnRef:
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef:
encoded_size += 1;
break;
- default:
+ case wasm::ValueType::kStmt:
+ case wasm::ValueType::kBottom:
UNREACHABLE();
}
}
@@ -1808,7 +1847,7 @@ Address WasmExportedFunction::GetWasmCallTarget() {
return instance().GetCallTarget(function_index());
}
-wasm::FunctionSig* WasmExportedFunction::sig() {
+const wasm::FunctionSig* WasmExportedFunction::sig() {
return instance().module()->functions[function_index()].sig;
}
@@ -1820,7 +1859,7 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
}
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
- wasm::FunctionSig* sig,
+ const wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
@@ -1846,6 +1885,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Handle<String> name = isolate->factory()->Function_string();
if (callable->IsJSFunction()) {
name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
+ name = String::Flatten(isolate, name);
}
Handle<Map> function_map = isolate->wasm_exported_function_map();
NewFunctionArgs args =
@@ -1859,7 +1899,7 @@ JSReceiver WasmJSFunction::GetCallable() const {
return shared().wasm_js_function_data().callable();
}
-wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
+const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
WasmJSFunctionData function_data = shared().wasm_js_function_data();
int sig_size = function_data.serialized_signature().length();
wasm::ValueType* types = zone->NewArray<wasm::ValueType>(sig_size);
@@ -1871,7 +1911,7 @@ wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
return new (zone) wasm::FunctionSig(return_count, parameter_count, types);
}
-bool WasmJSFunction::MatchesSignature(wasm::FunctionSig* sig) {
+bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
int return_count = static_cast<int>(sig->return_count());
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index c0a7956e15..972e8d31cd 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -93,21 +93,21 @@ class IndirectFunctionTableEntry {
// The underlying storage in the instance is used by generated code to
// call imported functions at runtime.
// Each entry is either:
-// - WASM to JS, which has fields
+// - Wasm to JS, which has fields
// - object = a Tuple2 of the importing instance and the callable
// - target = entrypoint to import wrapper code
-// - WASM to WASM, which has fields
+// - Wasm to Wasm, which has fields
// - object = target instance
// - target = entrypoint for the function
class ImportedFunctionEntry {
public:
inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index);
- // Initialize this entry as a WASM to JS call. This accepts the isolate as a
+ // Initialize this entry as a Wasm to JS call. This accepts the isolate as a
// parameter, since it must allocate a tuple.
V8_EXPORT_PRIVATE void SetWasmToJs(Isolate*, Handle<JSReceiver> callable,
const wasm::WasmCode* wasm_to_js_wrapper);
- // Initialize this entry as a WASM to WASM call.
+ // Initialize this entry as a Wasm to Wasm call.
void SetWasmToWasm(WasmInstanceObject target_instance, Address call_target);
WasmInstanceObject instance();
@@ -170,7 +170,7 @@ class WasmModuleObject : public JSObject {
uint32_t func_index);
// Get the function name of the function identified by the given index.
- // Returns "wasm-function[func_index]" if the function is unnamed or the
+ // Returns "func[func_index]" if the function is unnamed or the
// name is not a valid UTF-8 string.
static Handle<String> GetFunctionName(Isolate*, Handle<WasmModuleObject>,
uint32_t func_index);
@@ -247,7 +247,8 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
// TODO(wasm): Unify these three methods into one.
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
- int entry_index, wasm::FunctionSig* sig,
+ int entry_index,
+ const wasm::FunctionSig* sig,
Handle<WasmInstanceObject> target_instance,
int target_func_index);
static void UpdateDispatchTables(Isolate* isolate,
@@ -333,13 +334,7 @@ class WasmGlobalObject : public JSObject {
DECL_PRINTER(WasmGlobalObject)
DECL_VERIFIER(WasmGlobalObject)
-#define WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS(V, _) \
- V(TypeBits, wasm::ValueType, 8, _) \
- V(IsMutableBit, bool, 1, _)
-
- DEFINE_BIT_FIELDS(WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS)
-
-#undef WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS
+ DEFINE_TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FLAGS()
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FIELDS)
@@ -410,6 +405,7 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*)
DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
+ DECL_PRIMITIVE_ACCESSORS(hook_on_function_call_address, Address)
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic. Depending on the V8 build mode there could be no padding.
@@ -456,6 +452,7 @@ class WasmInstanceObject : public JSObject {
V(kDataSegmentStartsOffset, kSystemPointerSize) \
V(kDataSegmentSizesOffset, kSystemPointerSize) \
V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
+ V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -570,6 +567,11 @@ class WasmInstanceObject : public JSObject {
static wasm::WasmValue GetGlobalValue(Handle<WasmInstanceObject>,
const wasm::WasmGlobal&);
+ // Get the name of a global in the given instance by index.
+ static MaybeHandle<String> GetGlobalNameOrNull(Isolate*,
+ Handle<WasmInstanceObject>,
+ uint32_t global_index);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
@@ -641,7 +643,7 @@ class WasmExportedFunction : public JSFunction {
Address GetWasmCallTarget();
- wasm::FunctionSig* sig();
+ const wasm::FunctionSig* sig();
DECL_CAST(WasmExportedFunction)
OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction);
@@ -653,14 +655,15 @@ class WasmJSFunction : public JSFunction {
public:
static bool IsWasmJSFunction(Object object);
- static Handle<WasmJSFunction> New(Isolate* isolate, wasm::FunctionSig* sig,
+ static Handle<WasmJSFunction> New(Isolate* isolate,
+ const wasm::FunctionSig* sig,
Handle<JSReceiver> callable);
JSReceiver GetCallable() const;
// Deserializes the signature of this function using the provided zone. Note
// that lifetime of the signature is hence directly coupled to the zone.
- wasm::FunctionSig* GetSignature(Zone* zone);
- bool MatchesSignature(wasm::FunctionSig* sig);
+ const wasm::FunctionSig* GetSignature(Zone* zone);
+ bool MatchesSignature(const wasm::FunctionSig* sig);
DECL_CAST(WasmJSFunction)
OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
@@ -881,8 +884,14 @@ class WasmDebugInfo : public Struct {
Address frame_pointer,
int frame_index);
+ // Get stack scope details for a specific interpreted frame. It contains
+ // information about stack values.
+ static Handle<JSObject> GetStackScopeObject(Handle<WasmDebugInfo>,
+ Address frame_pointer,
+ int frame_index);
+
V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
- wasm::FunctionSig*);
+ const wasm::FunctionSig*);
OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
};
@@ -953,11 +962,6 @@ class WasmExceptionTag
V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
int index);
- // Note that this index is only useful for debugging purposes and it is not
- // unique across modules. The GC however does not allow objects without at
- // least one field, hence this also serves as a padding field for now.
- DECL_INT_ACCESSORS(index)
-
DECL_PRINTER(WasmExceptionTag)
TQ_OBJECT_CONSTRUCTORS(WasmExceptionTag)
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index adb3c18cd5..8eda9aba2e 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -51,6 +51,9 @@ extern class WasmDebugInfo extends Struct {
@generateCppClass
extern class WasmExceptionTag extends Struct {
+ // Note that this index is only useful for debugging purposes and it is not
+ // unique across modules. The GC however does not allow objects without at
+ // least one field, hence this also serves as a padding field for now.
index: Smi;
}
@@ -74,11 +77,17 @@ extern class WasmMemoryObject extends JSObject {
instances: WeakArrayList|Undefined;
}
+type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
+bitfield struct WasmGlobalObjectFlags extends uint31 {
+ Type: WasmValueType: 8 bit; // "type" is a reserved word.
+ is_mutable: bool: 1 bit;
+}
+
extern class WasmGlobalObject extends JSObject {
untagged_buffer: JSArrayBuffer|Undefined;
tagged_buffer: FixedArray|Undefined;
offset: Smi;
- flags: Smi;
+ flags: SmiTagged<WasmGlobalObjectFlags>;
}
extern class WasmExceptionObject extends JSObject {
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 7af21cf8a1..769eba35a1 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -43,8 +43,11 @@ namespace wasm {
#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
#define CASE_SIMD_OP(name, str) \
- CASE_F32x4_OP(name, str) CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
- CASE_I8x16_OP(name, str)
+ CASE_F64x2_OP(name, str) CASE_I64x2_OP(name, str) CASE_F32x4_OP(name, str) \
+ CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
+ CASE_I8x16_OP(name, str)
+#define CASE_SIMDF_OP(name, str) \
+ CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
#define CASE_SIMDI_OP(name, str) \
CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
#define CASE_SIGN_OP(TYPE, name, str) \
@@ -222,45 +225,23 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
- CASE_F64x2_OP(Neg, "neg")
- CASE_F64x2_OP(Sqrt, "sqrt")
- CASE_F32x4_OP(Sqrt, "sqrt")
- CASE_I64x2_OP(Neg, "neg")
+ CASE_SIMDF_OP(Sqrt, "sqrt")
CASE_SIMD_OP(Eq, "eq")
- CASE_F64x2_OP(Eq, "eq")
- CASE_I64x2_OP(Eq, "eq")
CASE_SIMD_OP(Ne, "ne")
- CASE_F64x2_OP(Ne, "ne")
- CASE_I64x2_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
- CASE_F64x2_OP(Add, "add")
- CASE_I64x2_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
- CASE_F64x2_OP(Sub, "sub")
- CASE_I64x2_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
- CASE_F64x2_OP(Mul, "mul")
- CASE_I64x2_OP(Mul, "mul")
- CASE_F64x2_OP(Div, "div")
- CASE_F32x4_OP(Div, "div")
- CASE_F64x2_OP(Splat, "splat")
- CASE_F64x2_OP(Lt, "lt")
- CASE_F64x2_OP(Le, "le")
- CASE_F64x2_OP(Gt, "gt")
- CASE_F64x2_OP(Ge, "ge")
- CASE_F64x2_OP(Abs, "abs")
- CASE_F32x4_OP(Abs, "abs")
+ CASE_SIMDF_OP(Div, "div")
+ CASE_SIMDF_OP(Lt, "lt")
+ CASE_SIMDF_OP(Le, "le")
+ CASE_SIMDF_OP(Gt, "gt")
+ CASE_SIMDF_OP(Ge, "ge")
+ CASE_SIMDF_OP(Abs, "abs")
CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
- CASE_F64x2_OP(Min, "min")
- CASE_F32x4_OP(Min, "min")
- CASE_F64x2_OP(Max, "max")
- CASE_F32x4_OP(Max, "max")
- CASE_F32x4_OP(Lt, "lt")
- CASE_F32x4_OP(Le, "le")
- CASE_F32x4_OP(Gt, "gt")
- CASE_F32x4_OP(Ge, "ge")
+ CASE_SIMDF_OP(Min, "min")
+ CASE_SIMDF_OP(Max, "max")
CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
@@ -269,10 +250,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
- CASE_F64x2_OP(ExtractLane, "extract_lane")
- CASE_F64x2_OP(ReplaceLane, "replace_lane")
- CASE_F32x4_OP(ExtractLane, "extract_lane")
- CASE_F32x4_OP(ReplaceLane, "replace_lane")
+ CASE_SIMDF_OP(ExtractLane, "extract_lane")
+ CASE_SIMDF_OP(ReplaceLane, "replace_lane")
CASE_I64x2_OP(ExtractLane, "extract_lane")
CASE_I64x2_OP(ReplaceLane, "replace_lane")
CASE_I32x4_OP(ExtractLane, "extract_lane")
@@ -295,7 +274,6 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
CASE_I64x2_OP(Shl, "shl")
- CASE_I64x2_OP(Splat, "splat")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
@@ -318,10 +296,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x8_OP(AllTrue, "all_true")
CASE_S1x16_OP(AnyTrue, "any_true")
CASE_S1x16_OP(AllTrue, "all_true")
- CASE_F64x2_OP(Qfma, "qfma")
- CASE_F64x2_OP(Qfms, "qfms")
- CASE_F32x4_OP(Qfma, "qfma")
- CASE_F32x4_OP(Qfms, "qfms")
+ CASE_SIMDF_OP(Qfma, "qfma")
+ CASE_SIMDF_OP(Qfms, "qfms")
CASE_S8x16_OP(LoadSplat, "load_splat")
CASE_S16x8_OP(LoadSplat, "load_splat")
@@ -337,6 +313,14 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I8x16_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(RoundingAverageU, "avgr_u")
+ CASE_I8x16_OP(Abs, "abs")
+ CASE_I16x8_OP(Abs, "abs")
+ CASE_I32x4_OP(Abs, "abs")
+
+ CASE_I8x16_OP(BitMask, "bitmask")
+ CASE_I16x8_OP(BitMask, "bitmask")
+ CASE_I32x4_OP(BitMask, "bitmask")
+
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
@@ -425,6 +409,19 @@ bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsBreakable(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprBlock:
+ case kExprTry:
+ case kExprCatch:
+ case kExprLoop:
+ case kExprElse:
+ return false;
+ default:
+ return true;
+ }
+}
+
bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
switch (opcode) {
case kExprRefNull:
@@ -449,15 +446,26 @@ bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
+ FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
+#undef CHECK_OPCODE
+ return true;
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
- os << ValueTypes::ShortNameOf(ret);
+ os << ret.short_name();
}
os << "_";
if (sig.parameter_count() == 0) os << "v";
for (auto param : sig.parameters()) {
- os << ValueTypes::ShortNameOf(param);
+ os << param.short_name();
}
return os;
}
@@ -545,29 +553,25 @@ constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
} // namespace
-FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
+const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
switch (opcode >> 8) {
case 0:
- return const_cast<FunctionSig*>(kCachedSigs[kShortSigTable[opcode]]);
+ return kCachedSigs[kShortSigTable[opcode]];
case kSimdPrefix:
- return const_cast<FunctionSig*>(
- kCachedSigs[kSimdExprSigTable[opcode & 0xFF]]);
+ return kCachedSigs[kSimdExprSigTable[opcode & 0xFF]];
case kAtomicPrefix:
- return const_cast<FunctionSig*>(
- kCachedSigs[kAtomicExprSigTable[opcode & 0xFF]]);
+ return kCachedSigs[kAtomicExprSigTable[opcode & 0xFF]];
case kNumericPrefix:
- return const_cast<FunctionSig*>(
- kCachedSigs[kNumericExprSigTable[opcode & 0xFF]]);
+ return kCachedSigs[kNumericExprSigTable[opcode & 0xFF]];
default:
UNREACHABLE(); // invalid prefix.
return nullptr;
}
}
-FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
+const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
DCHECK_GT(kSimpleAsmjsExprSigTable.size(), opcode);
- return const_cast<FunctionSig*>(
- kCachedSigs[kSimpleAsmjsExprSigTable[opcode]]);
+ return kCachedSigs[kSimpleAsmjsExprSigTable[opcode]];
}
// Define constexpr arrays.
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 53fe21f30f..d5c1644824 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -265,191 +265,199 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd00, s_i) \
- V(S128StoreMem, 0xfd01, v_is)
+ V(S128StoreMem, 0xfd01, v_is) \
+ V(S8x16LoadSplat, 0xfdc2, s_i) \
+ V(S16x8LoadSplat, 0xfdc3, s_i) \
+ V(S32x4LoadSplat, 0xfdc4, s_i) \
+ V(S64x2LoadSplat, 0xfdc5, s_i) \
+ V(I16x8Load8x8S, 0xfdd2, s_i) \
+ V(I16x8Load8x8U, 0xfdd3, s_i) \
+ V(I32x4Load16x4S, 0xfdd4, s_i) \
+ V(I32x4Load16x4U, 0xfdd5, s_i) \
+ V(I64x2Load32x2S, 0xfdd6, s_i) \
+ V(I64x2Load32x2U, 0xfdd7, s_i)
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd03, s_ss)
-#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
- V(I8x16Splat, 0xfd04, s_i) \
- V(I16x8Splat, 0xfd08, s_i) \
- V(I32x4Splat, 0xfd0c, s_i) \
- V(I64x2Splat, 0xfd0f, s_l) \
- V(F32x4Splat, 0xfd12, s_f) \
- V(F64x2Splat, 0xfd15, s_d) \
- V(I8x16Eq, 0xfd18, s_ss) \
- V(I8x16Ne, 0xfd19, s_ss) \
- V(I8x16LtS, 0xfd1a, s_ss) \
- V(I8x16LtU, 0xfd1b, s_ss) \
- V(I8x16GtS, 0xfd1c, s_ss) \
- V(I8x16GtU, 0xfd1d, s_ss) \
- V(I8x16LeS, 0xfd1e, s_ss) \
- V(I8x16LeU, 0xfd1f, s_ss) \
- V(I8x16GeS, 0xfd20, s_ss) \
- V(I8x16GeU, 0xfd21, s_ss) \
- V(I16x8Eq, 0xfd22, s_ss) \
- V(I16x8Ne, 0xfd23, s_ss) \
- V(I16x8LtS, 0xfd24, s_ss) \
- V(I16x8LtU, 0xfd25, s_ss) \
- V(I16x8GtS, 0xfd26, s_ss) \
- V(I16x8GtU, 0xfd27, s_ss) \
- V(I16x8LeS, 0xfd28, s_ss) \
- V(I16x8LeU, 0xfd29, s_ss) \
- V(I16x8GeS, 0xfd2a, s_ss) \
- V(I16x8GeU, 0xfd2b, s_ss) \
- V(I32x4Eq, 0xfd2c, s_ss) \
- V(I32x4Ne, 0xfd2d, s_ss) \
- V(I32x4LtS, 0xfd2e, s_ss) \
- V(I32x4LtU, 0xfd2f, s_ss) \
- V(I32x4GtS, 0xfd30, s_ss) \
- V(I32x4GtU, 0xfd31, s_ss) \
- V(I32x4LeS, 0xfd32, s_ss) \
- V(I32x4LeU, 0xfd33, s_ss) \
- V(I32x4GeS, 0xfd34, s_ss) \
- V(I32x4GeU, 0xfd35, s_ss) \
- V(I64x2Eq, 0xfd36, s_ss) \
- V(I64x2Ne, 0xfd37, s_ss) \
- V(I64x2LtS, 0xfd38, s_ss) \
- V(I64x2LtU, 0xfd39, s_ss) \
- V(I64x2GtS, 0xfd3a, s_ss) \
- V(I64x2GtU, 0xfd3b, s_ss) \
- V(I64x2LeS, 0xfd3c, s_ss) \
- V(I64x2LeU, 0xfd3d, s_ss) \
- V(I64x2GeS, 0xfd3e, s_ss) \
- V(I64x2GeU, 0xfd3f, s_ss) \
- V(F32x4Eq, 0xfd40, s_ss) \
- V(F32x4Ne, 0xfd41, s_ss) \
- V(F32x4Lt, 0xfd42, s_ss) \
- V(F32x4Gt, 0xfd43, s_ss) \
- V(F32x4Le, 0xfd44, s_ss) \
- V(F32x4Ge, 0xfd45, s_ss) \
- V(F64x2Eq, 0xfd46, s_ss) \
- V(F64x2Ne, 0xfd47, s_ss) \
- V(F64x2Lt, 0xfd48, s_ss) \
- V(F64x2Gt, 0xfd49, s_ss) \
- V(F64x2Le, 0xfd4a, s_ss) \
- V(F64x2Ge, 0xfd4b, s_ss) \
- V(S128Not, 0xfd4c, s_s) \
- V(S128And, 0xfd4d, s_ss) \
- V(S128Or, 0xfd4e, s_ss) \
- V(S128Xor, 0xfd4f, s_ss) \
- V(S128Select, 0xfd50, s_sss) \
- V(I8x16Neg, 0xfd51, s_s) \
- V(S1x16AnyTrue, 0xfd52, i_s) \
- V(S1x16AllTrue, 0xfd53, i_s) \
- V(I8x16Shl, 0xfd54, s_si) \
- V(I8x16ShrS, 0xfd55, s_si) \
- V(I8x16ShrU, 0xfd56, s_si) \
- V(I8x16Add, 0xfd57, s_ss) \
- V(I8x16AddSaturateS, 0xfd58, s_ss) \
- V(I8x16AddSaturateU, 0xfd59, s_ss) \
- V(I8x16Sub, 0xfd5a, s_ss) \
- V(I8x16SubSaturateS, 0xfd5b, s_ss) \
- V(I8x16SubSaturateU, 0xfd5c, s_ss) \
- V(I8x16Mul, 0xfd5d, s_ss) \
- V(I8x16MinS, 0xfd5e, s_ss) \
- V(I8x16MinU, 0xfd5f, s_ss) \
- V(I8x16MaxS, 0xfd60, s_ss) \
- V(I8x16MaxU, 0xfd61, s_ss) \
- V(I16x8Neg, 0xfd62, s_s) \
- V(S1x8AnyTrue, 0xfd63, i_s) \
- V(S1x8AllTrue, 0xfd64, i_s) \
- V(I16x8Shl, 0xfd65, s_si) \
- V(I16x8ShrS, 0xfd66, s_si) \
- V(I16x8ShrU, 0xfd67, s_si) \
- V(I16x8Add, 0xfd68, s_ss) \
- V(I16x8AddSaturateS, 0xfd69, s_ss) \
- V(I16x8AddSaturateU, 0xfd6a, s_ss) \
- V(I16x8Sub, 0xfd6b, s_ss) \
- V(I16x8SubSaturateS, 0xfd6c, s_ss) \
- V(I16x8SubSaturateU, 0xfd6d, s_ss) \
- V(I16x8Mul, 0xfd6e, s_ss) \
- V(I16x8MinS, 0xfd6f, s_ss) \
- V(I16x8MinU, 0xfd70, s_ss) \
- V(I16x8MaxS, 0xfd71, s_ss) \
- V(I16x8MaxU, 0xfd72, s_ss) \
- V(I32x4Neg, 0xfd73, s_s) \
- V(S1x4AnyTrue, 0xfd74, i_s) \
- V(S1x4AllTrue, 0xfd75, i_s) \
- V(I32x4Shl, 0xfd76, s_si) \
- V(I32x4ShrS, 0xfd77, s_si) \
- V(I32x4ShrU, 0xfd78, s_si) \
- V(I32x4Add, 0xfd79, s_ss) \
- V(I32x4Sub, 0xfd7c, s_ss) \
- V(I32x4Mul, 0xfd7f, s_ss) \
- V(I32x4MinS, 0xfd80, s_ss) \
- V(I32x4MinU, 0xfd81, s_ss) \
- V(I32x4MaxS, 0xfd82, s_ss) \
- V(I32x4MaxU, 0xfd83, s_ss) \
- V(I64x2Neg, 0xfd84, s_s) \
- V(S1x2AnyTrue, 0xfd85, i_s) \
- V(S1x2AllTrue, 0xfd86, i_s) \
- V(I64x2Shl, 0xfd87, s_si) \
- V(I64x2ShrS, 0xfd88, s_si) \
- V(I64x2ShrU, 0xfd89, s_si) \
- V(I64x2Add, 0xfd8a, s_ss) \
- V(I64x2Sub, 0xfd8d, s_ss) \
- V(I64x2Mul, 0xfd8c, s_ss) \
- V(I64x2MinS, 0xfd8e, s_ss) \
- V(I64x2MinU, 0xfd8f, s_ss) \
- V(I64x2MaxS, 0xfd90, s_ss) \
- V(I64x2MaxU, 0xfd91, s_ss) \
- V(F32x4Abs, 0xfd95, s_s) \
- V(F32x4Neg, 0xfd96, s_s) \
- V(F32x4Sqrt, 0xfd97, s_s) \
- V(F32x4Qfma, 0xfd98, s_sss) \
- V(F32x4Qfms, 0xfd99, s_sss) \
- V(F32x4Add, 0xfd9a, s_ss) \
- V(F32x4Sub, 0xfd9b, s_ss) \
- V(F32x4Mul, 0xfd9c, s_ss) \
- V(F32x4Div, 0xfd9d, s_ss) \
- V(F32x4Min, 0xfd9e, s_ss) \
- V(F32x4Max, 0xfd9f, s_ss) \
- V(F64x2Abs, 0xfda0, s_s) \
- V(F64x2Neg, 0xfda1, s_s) \
- V(F64x2Sqrt, 0xfda2, s_s) \
- V(F64x2Qfma, 0xfda3, s_sss) \
- V(F64x2Qfms, 0xfda4, s_sss) \
- V(F64x2Add, 0xfda5, s_ss) \
- V(F64x2Sub, 0xfda6, s_ss) \
- V(F64x2Mul, 0xfda7, s_ss) \
- V(F64x2Div, 0xfda8, s_ss) \
- V(F64x2Min, 0xfda9, s_ss) \
- V(F64x2Max, 0xfdaa, s_ss) \
- V(I32x4SConvertF32x4, 0xfdab, s_s) \
- V(I32x4UConvertF32x4, 0xfdac, s_s) \
- V(F32x4SConvertI32x4, 0xfdaf, s_s) \
- V(F32x4UConvertI32x4, 0xfdb0, s_s) \
- V(S8x16Swizzle, 0xfdc0, s_ss) \
- V(S8x16LoadSplat, 0xfdc2, s_i) \
- V(S16x8LoadSplat, 0xfdc3, s_i) \
- V(S32x4LoadSplat, 0xfdc4, s_i) \
- V(S64x2LoadSplat, 0xfdc5, s_i) \
- V(I8x16SConvertI16x8, 0xfdc6, s_ss) \
- V(I8x16UConvertI16x8, 0xfdc7, s_ss) \
- V(I16x8SConvertI32x4, 0xfdc8, s_ss) \
- V(I16x8UConvertI32x4, 0xfdc9, s_ss) \
- V(I16x8SConvertI8x16Low, 0xfdca, s_s) \
- V(I16x8SConvertI8x16High, 0xfdcb, s_s) \
- V(I16x8UConvertI8x16Low, 0xfdcc, s_s) \
- V(I16x8UConvertI8x16High, 0xfdcd, s_s) \
- V(I32x4SConvertI16x8Low, 0xfdce, s_s) \
- V(I32x4SConvertI16x8High, 0xfdcf, s_s) \
- V(I32x4UConvertI16x8Low, 0xfdd0, s_s) \
- V(I32x4UConvertI16x8High, 0xfdd1, s_s) \
- V(I16x8Load8x8S, 0xfdd2, s_s) \
- V(I16x8Load8x8U, 0xfdd3, s_s) \
- V(I32x4Load16x4S, 0xfdd4, s_s) \
- V(I32x4Load16x4U, 0xfdd5, s_s) \
- V(I64x2Load32x2S, 0xfdd6, s_s) \
- V(I64x2Load32x2U, 0xfdd7, s_s) \
- V(S128AndNot, 0xfdd8, s_ss) \
- V(I8x16RoundingAverageU, 0xfdd9, s_ss) \
- V(I16x8RoundingAverageU, 0xfdda, s_ss) \
- V(I16x8AddHoriz, 0xfdbd, s_ss) \
- V(I32x4AddHoriz, 0xfdbe, s_ss) \
- V(F32x4AddHoriz, 0xfdbf, s_ss) \
- V(F32x4RecipApprox, 0xfde0, s_s) \
- V(F32x4RecipSqrtApprox, 0xfde1, s_s)
+#define FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
+ V(I8x16Splat, 0xfd04, s_i) \
+ V(I16x8Splat, 0xfd08, s_i) \
+ V(I32x4Splat, 0xfd0c, s_i) \
+ V(I64x2Splat, 0xfd0f, s_l) \
+ V(F32x4Splat, 0xfd12, s_f) \
+ V(F64x2Splat, 0xfd15, s_d) \
+ V(I8x16Eq, 0xfd18, s_ss) \
+ V(I8x16Ne, 0xfd19, s_ss) \
+ V(I8x16LtS, 0xfd1a, s_ss) \
+ V(I8x16LtU, 0xfd1b, s_ss) \
+ V(I8x16GtS, 0xfd1c, s_ss) \
+ V(I8x16GtU, 0xfd1d, s_ss) \
+ V(I8x16LeS, 0xfd1e, s_ss) \
+ V(I8x16LeU, 0xfd1f, s_ss) \
+ V(I8x16GeS, 0xfd20, s_ss) \
+ V(I8x16GeU, 0xfd21, s_ss) \
+ V(I16x8Eq, 0xfd22, s_ss) \
+ V(I16x8Ne, 0xfd23, s_ss) \
+ V(I16x8LtS, 0xfd24, s_ss) \
+ V(I16x8LtU, 0xfd25, s_ss) \
+ V(I16x8GtS, 0xfd26, s_ss) \
+ V(I16x8GtU, 0xfd27, s_ss) \
+ V(I16x8LeS, 0xfd28, s_ss) \
+ V(I16x8LeU, 0xfd29, s_ss) \
+ V(I16x8GeS, 0xfd2a, s_ss) \
+ V(I16x8GeU, 0xfd2b, s_ss) \
+ V(I32x4Eq, 0xfd2c, s_ss) \
+ V(I32x4Ne, 0xfd2d, s_ss) \
+ V(I32x4LtS, 0xfd2e, s_ss) \
+ V(I32x4LtU, 0xfd2f, s_ss) \
+ V(I32x4GtS, 0xfd30, s_ss) \
+ V(I32x4GtU, 0xfd31, s_ss) \
+ V(I32x4LeS, 0xfd32, s_ss) \
+ V(I32x4LeU, 0xfd33, s_ss) \
+ V(I32x4GeS, 0xfd34, s_ss) \
+ V(I32x4GeU, 0xfd35, s_ss) \
+ V(F32x4Eq, 0xfd40, s_ss) \
+ V(F32x4Ne, 0xfd41, s_ss) \
+ V(F32x4Lt, 0xfd42, s_ss) \
+ V(F32x4Gt, 0xfd43, s_ss) \
+ V(F32x4Le, 0xfd44, s_ss) \
+ V(F32x4Ge, 0xfd45, s_ss) \
+ V(F64x2Eq, 0xfd46, s_ss) \
+ V(F64x2Ne, 0xfd47, s_ss) \
+ V(F64x2Lt, 0xfd48, s_ss) \
+ V(F64x2Gt, 0xfd49, s_ss) \
+ V(F64x2Le, 0xfd4a, s_ss) \
+ V(F64x2Ge, 0xfd4b, s_ss) \
+ V(S128Not, 0xfd4c, s_s) \
+ V(S128And, 0xfd4d, s_ss) \
+ V(S128Or, 0xfd4e, s_ss) \
+ V(S128Xor, 0xfd4f, s_ss) \
+ V(S128Select, 0xfd50, s_sss) \
+ V(I8x16Neg, 0xfd51, s_s) \
+ V(S1x16AnyTrue, 0xfd52, i_s) \
+ V(S1x16AllTrue, 0xfd53, i_s) \
+ V(I8x16Shl, 0xfd54, s_si) \
+ V(I8x16ShrS, 0xfd55, s_si) \
+ V(I8x16ShrU, 0xfd56, s_si) \
+ V(I8x16Add, 0xfd57, s_ss) \
+ V(I8x16AddSaturateS, 0xfd58, s_ss) \
+ V(I8x16AddSaturateU, 0xfd59, s_ss) \
+ V(I8x16Sub, 0xfd5a, s_ss) \
+ V(I8x16SubSaturateS, 0xfd5b, s_ss) \
+ V(I8x16SubSaturateU, 0xfd5c, s_ss) \
+ V(I8x16Mul, 0xfd5d, s_ss) \
+ V(I8x16MinS, 0xfd5e, s_ss) \
+ V(I8x16MinU, 0xfd5f, s_ss) \
+ V(I8x16MaxS, 0xfd60, s_ss) \
+ V(I8x16MaxU, 0xfd61, s_ss) \
+ V(I16x8Neg, 0xfd62, s_s) \
+ V(S1x8AnyTrue, 0xfd63, i_s) \
+ V(S1x8AllTrue, 0xfd64, i_s) \
+ V(I16x8Shl, 0xfd65, s_si) \
+ V(I16x8ShrS, 0xfd66, s_si) \
+ V(I16x8ShrU, 0xfd67, s_si) \
+ V(I16x8Add, 0xfd68, s_ss) \
+ V(I16x8AddSaturateS, 0xfd69, s_ss) \
+ V(I16x8AddSaturateU, 0xfd6a, s_ss) \
+ V(I16x8Sub, 0xfd6b, s_ss) \
+ V(I16x8SubSaturateS, 0xfd6c, s_ss) \
+ V(I16x8SubSaturateU, 0xfd6d, s_ss) \
+ V(I16x8Mul, 0xfd6e, s_ss) \
+ V(I16x8MinS, 0xfd6f, s_ss) \
+ V(I16x8MinU, 0xfd70, s_ss) \
+ V(I16x8MaxS, 0xfd71, s_ss) \
+ V(I16x8MaxU, 0xfd72, s_ss) \
+ V(I32x4Neg, 0xfd73, s_s) \
+ V(S1x4AnyTrue, 0xfd74, i_s) \
+ V(S1x4AllTrue, 0xfd75, i_s) \
+ V(I32x4Shl, 0xfd76, s_si) \
+ V(I32x4ShrS, 0xfd77, s_si) \
+ V(I32x4ShrU, 0xfd78, s_si) \
+ V(I32x4Add, 0xfd79, s_ss) \
+ V(I32x4Sub, 0xfd7c, s_ss) \
+ V(I32x4Mul, 0xfd7f, s_ss) \
+ V(I32x4MinS, 0xfd80, s_ss) \
+ V(I32x4MinU, 0xfd81, s_ss) \
+ V(I32x4MaxS, 0xfd82, s_ss) \
+ V(I32x4MaxU, 0xfd83, s_ss) \
+ V(I64x2Neg, 0xfd84, s_s) \
+ V(I64x2Shl, 0xfd87, s_si) \
+ V(I64x2ShrS, 0xfd88, s_si) \
+ V(I64x2ShrU, 0xfd89, s_si) \
+ V(I64x2Add, 0xfd8a, s_ss) \
+ V(I64x2Sub, 0xfd8d, s_ss) \
+ V(I64x2Mul, 0xfd8c, s_ss) \
+ V(F32x4Abs, 0xfd95, s_s) \
+ V(F32x4Neg, 0xfd96, s_s) \
+ V(F32x4Sqrt, 0xfd97, s_s) \
+ V(F32x4Add, 0xfd9a, s_ss) \
+ V(F32x4Sub, 0xfd9b, s_ss) \
+ V(F32x4Mul, 0xfd9c, s_ss) \
+ V(F32x4Div, 0xfd9d, s_ss) \
+ V(F32x4Min, 0xfd9e, s_ss) \
+ V(F32x4Max, 0xfd9f, s_ss) \
+ V(F64x2Abs, 0xfda0, s_s) \
+ V(F64x2Neg, 0xfda1, s_s) \
+ V(F64x2Sqrt, 0xfda2, s_s) \
+ V(F64x2Add, 0xfda5, s_ss) \
+ V(F64x2Sub, 0xfda6, s_ss) \
+ V(F64x2Mul, 0xfda7, s_ss) \
+ V(F64x2Div, 0xfda8, s_ss) \
+ V(F64x2Min, 0xfda9, s_ss) \
+ V(F64x2Max, 0xfdaa, s_ss) \
+ V(I32x4SConvertF32x4, 0xfdab, s_s) \
+ V(I32x4UConvertF32x4, 0xfdac, s_s) \
+ V(F32x4SConvertI32x4, 0xfdaf, s_s) \
+ V(F32x4UConvertI32x4, 0xfdb0, s_s) \
+ V(S8x16Swizzle, 0xfdc0, s_ss) \
+ V(I8x16SConvertI16x8, 0xfdc6, s_ss) \
+ V(I8x16UConvertI16x8, 0xfdc7, s_ss) \
+ V(I16x8SConvertI32x4, 0xfdc8, s_ss) \
+ V(I16x8UConvertI32x4, 0xfdc9, s_ss) \
+ V(I16x8SConvertI8x16Low, 0xfdca, s_s) \
+ V(I16x8SConvertI8x16High, 0xfdcb, s_s) \
+ V(I16x8UConvertI8x16Low, 0xfdcc, s_s) \
+ V(I16x8UConvertI8x16High, 0xfdcd, s_s) \
+ V(I32x4SConvertI16x8Low, 0xfdce, s_s) \
+ V(I32x4SConvertI16x8High, 0xfdcf, s_s) \
+ V(I32x4UConvertI16x8Low, 0xfdd0, s_s) \
+ V(I32x4UConvertI16x8High, 0xfdd1, s_s) \
+ V(S128AndNot, 0xfdd8, s_ss) \
+ V(I8x16RoundingAverageU, 0xfdd9, s_ss) \
+ V(I16x8RoundingAverageU, 0xfdda, s_ss) \
+ V(I8x16Abs, 0xfde1, s_s) \
+ V(I16x8Abs, 0xfde2, s_s) \
+ V(I32x4Abs, 0xfde3, s_s)
+
+#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
+ V(I64x2Eq, 0xfd36, s_ss) \
+ V(I64x2Ne, 0xfd37, s_ss) \
+ V(I64x2LtS, 0xfd38, s_ss) \
+ V(I64x2LtU, 0xfd39, s_ss) \
+ V(I64x2GtS, 0xfd3a, s_ss) \
+ V(I64x2GtU, 0xfd3b, s_ss) \
+ V(I64x2LeS, 0xfd3c, s_ss) \
+ V(I64x2LeU, 0xfd3d, s_ss) \
+ V(I64x2GeS, 0xfd3e, s_ss) \
+ V(I64x2GeU, 0xfd3f, s_ss) \
+ V(S1x2AnyTrue, 0xfd85, i_s) \
+ V(S1x2AllTrue, 0xfd86, i_s) \
+ V(I64x2MinS, 0xfd8e, s_ss) \
+ V(I64x2MinU, 0xfd8f, s_ss) \
+ V(I64x2MaxS, 0xfd90, s_ss) \
+ V(I64x2MaxU, 0xfd91, s_ss) \
+ V(F32x4Qfma, 0xfd98, s_sss) \
+ V(F32x4Qfms, 0xfd99, s_sss) \
+ V(F64x2Qfma, 0xfda3, s_sss) \
+ V(F64x2Qfms, 0xfda4, s_sss) \
+ V(I16x8AddHoriz, 0xfdbd, s_ss) \
+ V(I32x4AddHoriz, 0xfdbe, s_ss) \
+ V(F32x4AddHoriz, 0xfdbf, s_ss) \
+ V(I8x16BitMask, 0xfde4, i_s) \
+ V(I16x8BitMask, 0xfde5, i_s) \
+ V(I32x4BitMask, 0xfde6, i_s) \
+ V(F32x4RecipApprox, 0xfdee, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfdef, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd05, _) \
@@ -469,6 +477,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32x4ReplaceLane, 0xfd14, _) \
V(F64x2ReplaceLane, 0xfd17, _)
+#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_POST_MVP_OPCODE(V)
+
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
@@ -664,19 +676,21 @@ enum TrapReason {
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
- static const char* OpcodeName(WasmOpcode opcode);
- static FunctionSig* Signature(WasmOpcode opcode);
- static FunctionSig* AsmjsSignature(WasmOpcode opcode);
- static bool IsPrefixOpcode(WasmOpcode opcode);
- static bool IsControlOpcode(WasmOpcode opcode);
- static bool IsAnyRefOpcode(WasmOpcode opcode);
- static bool IsThrowingOpcode(WasmOpcode opcode);
+ static const char* OpcodeName(WasmOpcode);
+ static const FunctionSig* Signature(WasmOpcode);
+ static const FunctionSig* AsmjsSignature(WasmOpcode);
+ static bool IsPrefixOpcode(WasmOpcode);
+ static bool IsControlOpcode(WasmOpcode);
+ static bool IsAnyRefOpcode(WasmOpcode);
+ static bool IsThrowingOpcode(WasmOpcode);
+ static bool IsSimdPostMvpOpcode(WasmOpcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
- static bool IsUnconditionalJump(WasmOpcode opcode);
+ static bool IsUnconditionalJump(WasmOpcode);
+ static bool IsBreakable(WasmOpcode);
- static MessageTemplate TrapReasonToMessageId(TrapReason reason);
- static const char* TrapReasonMessage(TrapReason reason);
+ static MessageTemplate TrapReasonToMessageId(TrapReason);
+ static const char* TrapReasonMessage(TrapReason);
};
// Representation of an initializer expression.
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 42eee037d5..fc1104b8d0 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -155,6 +155,21 @@ ErrorThrower::~ErrorThrower() {
}
}
+ScheduledErrorThrower::~ScheduledErrorThrower() {
+ // There should never be both a pending and a scheduled exception.
+ DCHECK(!isolate()->has_scheduled_exception() ||
+ !isolate()->has_pending_exception());
+ // Don't throw another error if there is already a scheduled error.
+ if (isolate()->has_scheduled_exception()) {
+ Reset();
+ } else if (isolate()->has_pending_exception()) {
+ Reset();
+ isolate()->OptionalRescheduleException(false);
+ } else if (error()) {
+ isolate()->ScheduleThrow(*Reify());
+ }
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 92049e6080..784dd0f615 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -168,6 +168,20 @@ class V8_EXPORT_PRIVATE ErrorThrower {
DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
+// Like an ErrorThrower, but turns all pending exceptions into scheduled
+// exceptions when going out of scope. Use this in API methods.
+// Note that pending exceptions are not necessarily created by the ErrorThrower,
+// but e.g. by the wasm start function. There might also be a scheduled
+// exception, created by another API call (e.g. v8::Object::Get). But there
+// should never be both pending and scheduled exceptions.
+class V8_EXPORT_PRIVATE ScheduledErrorThrower : public ErrorThrower {
+ public:
+ ScheduledErrorThrower(i::Isolate* isolate, const char* context)
+ : ErrorThrower(isolate, context) {}
+
+ ~ScheduledErrorThrower();
+};
+
// Use {nullptr_t} as data value to indicate that this only stores the error,
// but no result value (the only valid value is {nullptr}).
// [Storing {void} would require template specialization.]
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index c736a272b1..773a709721 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -30,7 +30,7 @@ namespace wasm {
namespace {
// TODO(bbudge) Try to unify the various implementations of readers and writers
-// in WASM, e.g. StreamProcessor and ZoneBuffer, with these.
+// in Wasm, e.g. StreamProcessor and ZoneBuffer, with these.
class Writer {
public:
explicit Writer(Vector<byte> buffer)
@@ -99,16 +99,17 @@ class Reader {
return value;
}
- void ReadVector(Vector<byte> v) {
- if (v.size() > 0) {
- DCHECK_GE(current_size(), v.size());
- memcpy(v.begin(), current_location(), v.size());
- pos_ += v.size();
- }
+ template <typename T>
+ Vector<const T> ReadVector(size_t size) {
+ DCHECK_GE(current_size(), size);
+ Vector<const byte> bytes{pos_, size * sizeof(T)};
+ pos_ += size * sizeof(T);
if (FLAG_trace_wasm_serialization) {
- StdoutStream{} << "read vector of " << v.size() << " elements"
+ StdoutStream{} << "read vector of " << size << " elements of size "
+ << sizeof(T) << " (total size " << size * sizeof(T) << ")"
<< std::endl;
}
+ return Vector<const T>::cast(bytes);
}
void Skip(size_t size) { pos_ += size; }
@@ -188,21 +189,20 @@ constexpr size_t kHeaderSize =
sizeof(uint32_t) + // total wasm function count
sizeof(uint32_t); // imported functions (index of first wasm function)
-constexpr size_t kCodeHeaderSize =
- sizeof(size_t) + // size of code section
- sizeof(size_t) + // offset of constant pool
- sizeof(size_t) + // offset of safepoint table
- sizeof(size_t) + // offset of handler table
- sizeof(size_t) + // offset of code comments
- sizeof(size_t) + // unpadded binary size
- sizeof(uint32_t) + // stack slots
- sizeof(uint32_t) + // tagged parameter slots
- sizeof(size_t) + // code size
- sizeof(size_t) + // reloc size
- sizeof(size_t) + // source positions size
- sizeof(size_t) + // protected instructions size
- sizeof(WasmCode::Kind) + // code kind
- sizeof(ExecutionTier); // tier
+constexpr size_t kCodeHeaderSize = sizeof(bool) + // whether code is present
+ sizeof(int) + // offset of constant pool
+ sizeof(int) + // offset of safepoint table
+ sizeof(int) + // offset of handler table
+ sizeof(int) + // offset of code comments
+ sizeof(int) + // unpadded binary size
+ sizeof(int) + // stack slots
+ sizeof(int) + // tagged parameter slots
+ sizeof(int) + // code size
+ sizeof(int) + // reloc size
+ sizeof(int) + // source positions size
+ sizeof(int) + // protected instructions size
+ sizeof(WasmCode::Kind) + // code kind
+ sizeof(ExecutionTier); // tier
// A List of all isolate-independent external references. This is used to create
// a tag from the Address of an external reference and vice versa.
@@ -300,13 +300,12 @@ NativeModuleSerializer::NativeModuleSerializer(
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- if (code == nullptr) return sizeof(size_t);
+ if (code == nullptr) return sizeof(bool);
DCHECK(code->kind() == WasmCode::kFunction ||
code->kind() == WasmCode::kInterpreterEntry);
return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() +
- code->protected_instructions().size() *
- sizeof(trap_handler::ProtectedInstructionData);
+ code->protected_instructions_data().size();
}
size_t NativeModuleSerializer::Measure() const {
@@ -327,13 +326,13 @@ void NativeModuleSerializer::WriteHeader(Writer* writer) {
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
if (code == nullptr) {
- writer->Write(size_t{0});
+ writer->Write(false);
return;
}
+ writer->Write(true);
DCHECK(code->kind() == WasmCode::kFunction ||
code->kind() == WasmCode::kInterpreterEntry);
// Write the size of the entire code section, followed by the code header.
- writer->Write(MeasureCode(code));
writer->Write(code->constant_pool_offset());
writer->Write(code->safepoint_table_offset());
writer->Write(code->handler_table_offset());
@@ -341,10 +340,10 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->unpadded_binary_size());
writer->Write(code->stack_slots());
writer->Write(code->tagged_parameter_slots());
- writer->Write(code->instructions().size());
- writer->Write(code->reloc_info().size());
- writer->Write(code->source_positions().size());
- writer->Write(code->protected_instructions().size());
+ writer->Write(code->instructions().length());
+ writer->Write(code->reloc_info().length());
+ writer->Write(code->source_positions().length());
+ writer->Write(code->protected_instructions_data().length());
writer->Write(code->kind());
writer->Write(code->tier());
@@ -356,14 +355,16 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
// Write the reloc info, source positions, and protected code.
writer->WriteVector(code->reloc_info());
writer->WriteVector(code->source_positions());
- writer->WriteVector(Vector<byte>::cast(code->protected_instructions()));
+ writer->WriteVector(code->protected_instructions_data());
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390X
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
std::unique_ptr<byte[]> aligned_buffer;
if (!IsAligned(reinterpret_cast<Address>(serialized_code_start),
- kInt32Size)) {
+ kSystemPointerSize)) {
+ // 'byte' does not guarantee an alignment but seems to work well enough in
+ // practice.
aligned_buffer.reset(new byte[code_size]);
code_start = aligned_buffer.get();
}
@@ -462,7 +463,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
private:
bool ReadHeader(Reader* reader);
- bool ReadCode(uint32_t fn_index, Reader* reader);
+ bool ReadCode(int fn_index, Reader* reader);
NativeModule* const native_module_;
bool read_called_;
@@ -480,6 +481,7 @@ bool NativeModuleDeserializer::Read(Reader* reader) {
if (!ReadHeader(reader)) return false;
uint32_t total_fns = native_module_->num_functions();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
+ WasmCodeRefScope wasm_code_ref_scope;
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
if (!ReadCode(i, reader)) return false;
}
@@ -493,46 +495,39 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
imports == native_module_->num_imported_functions();
}
-bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
- size_t code_section_size = reader->Read<size_t>();
- if (code_section_size == 0) {
+bool NativeModuleDeserializer::ReadCode(int fn_index, Reader* reader) {
+ bool has_code = reader->Read<bool>();
+ if (!has_code) {
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().has_compilation_hints());
native_module_->UseLazyStub(fn_index);
return true;
}
- size_t constant_pool_offset = reader->Read<size_t>();
- size_t safepoint_table_offset = reader->Read<size_t>();
- size_t handler_table_offset = reader->Read<size_t>();
- size_t code_comment_offset = reader->Read<size_t>();
- size_t unpadded_binary_size = reader->Read<size_t>();
- uint32_t stack_slot_count = reader->Read<uint32_t>();
- uint32_t tagged_parameter_slots = reader->Read<uint32_t>();
- size_t code_size = reader->Read<size_t>();
- size_t reloc_size = reader->Read<size_t>();
- size_t source_position_size = reader->Read<size_t>();
- size_t protected_instructions_size = reader->Read<size_t>();
+ int constant_pool_offset = reader->Read<int>();
+ int safepoint_table_offset = reader->Read<int>();
+ int handler_table_offset = reader->Read<int>();
+ int code_comment_offset = reader->Read<int>();
+ int unpadded_binary_size = reader->Read<int>();
+ int stack_slot_count = reader->Read<int>();
+ int tagged_parameter_slots = reader->Read<int>();
+ int code_size = reader->Read<int>();
+ int reloc_size = reader->Read<int>();
+ int source_position_size = reader->Read<int>();
+ int protected_instructions_size = reader->Read<int>();
WasmCode::Kind kind = reader->Read<WasmCode::Kind>();
ExecutionTier tier = reader->Read<ExecutionTier>();
- Vector<const byte> code_buffer = {reader->current_location(), code_size};
- reader->Skip(code_size);
-
- OwnedVector<byte> reloc_info = OwnedVector<byte>::New(reloc_size);
- reader->ReadVector(reloc_info.as_vector());
- OwnedVector<byte> source_pos = OwnedVector<byte>::New(source_position_size);
- reader->ReadVector(source_pos.as_vector());
+ auto code_buffer = reader->ReadVector<byte>(code_size);
+ auto reloc_info = reader->ReadVector<byte>(reloc_size);
+ auto source_pos = reader->ReadVector<byte>(source_position_size);
auto protected_instructions =
- OwnedVector<trap_handler::ProtectedInstructionData>::New(
- protected_instructions_size);
- reader->ReadVector(Vector<byte>::cast(protected_instructions.as_vector()));
+ reader->ReadVector<byte>(protected_instructions_size);
WasmCode* code = native_module_->AddDeserializedCode(
fn_index, code_buffer, stack_slot_count, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
- code_comment_offset, unpadded_binary_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos), kind, tier);
+ code_comment_offset, unpadded_binary_size, protected_instructions,
+ std::move(reloc_info), std::move(source_pos), kind, tier);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
@@ -540,8 +535,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- auto jump_tables_ref =
- native_module_->FindJumpTablesForCode(code->instruction_start());
+ auto jump_tables_ref = native_module_->FindJumpTablesForRegion(
+ base::AddressRegionOf(code->instructions()));
for (RelocIterator iter(code->instructions(), code->reloc_info(),
code->constant_pool(), mask);
!iter.done(); iter.next()) {
@@ -616,12 +611,12 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
if (decode_result.failed()) return {};
std::shared_ptr<WasmModule> module = std::move(decode_result.value());
CHECK_NOT_NULL(module);
- Handle<Script> script =
- CreateWasmScript(isolate, wire_bytes, VectorOf(module->source_map_url),
- module->name, source_url);
+ Handle<Script> script = CreateWasmScript(isolate, wire_bytes_vec,
+ VectorOf(module->source_map_url),
+ module->name, source_url);
- auto shared_native_module =
- wasm_engine->MaybeGetNativeModule(module->origin, wire_bytes_vec);
+ auto shared_native_module = wasm_engine->MaybeGetNativeModule(
+ module->origin, wire_bytes_vec, isolate);
if (shared_native_module == nullptr) {
const bool kIncludeLiftoff = false;
size_t code_size_estimate =
@@ -633,11 +628,9 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
OwnedVector<uint8_t>::Of(wire_bytes_vec));
NativeModuleDeserializer deserializer(shared_native_module.get());
- WasmCodeRefScope wasm_code_ref_scope;
-
Reader reader(data + WasmSerializer::kHeaderSize);
bool error = !deserializer.Read(&reader);
- wasm_engine->UpdateNativeModuleCache(shared_native_module, error);
+ wasm_engine->UpdateNativeModuleCache(error, &shared_native_module, isolate);
if (error) return {};
}
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index b649723479..8434a2e14b 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
namespace wasm {
-// All the tiers of WASM execution.
+// All the tiers of Wasm execution.
enum class ExecutionTier : int8_t {
kNone,
kInterpreter,
diff --git a/deps/v8/test/benchmarks/csuite/csuite.py b/deps/v8/test/benchmarks/csuite/csuite.py
index ce2ad84384..9b0a0e6436 100755
--- a/deps/v8/test/benchmarks/csuite/csuite.py
+++ b/deps/v8/test/benchmarks/csuite/csuite.py
@@ -124,7 +124,7 @@ if __name__ == '__main__':
# Ensure output directory is setup
output_path_base = os.path.abspath(os.getcwd())
output_path = os.path.join(output_path_base, "_results")
- output_file = os.path.join(output_path, "master")
+ output_file = os.path.join(output_path, "master_" + suite)
if not os.path.exists(output_path):
if opts.verbose:
print("Creating directory %s." % output_path)
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index ee6407c74b..89fe36f65b 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -14,17 +14,11 @@ config("cctest_config") {
v8_executable("cctest") {
testonly = true
- sources = [
- "cctest.cc",
- ]
+ sources = [ "cctest.cc" ]
- deps = [
- ":cctest_sources",
- ]
+ deps = [ ":cctest_sources" ]
- data_deps = [
- "../../tools:v8_testrunner",
- ]
+ data_deps = [ "../../tools:v8_testrunner" ]
data = [
"testcfg.py",
@@ -35,15 +29,12 @@ v8_executable("cctest") {
configs = [
"../..:external_config",
"../..:internal_config_base",
+ "../..:v8_tracing_config",
":cctest_config",
]
ldflags = []
- if (v8_use_perfetto) {
- deps += [ "//third_party/perfetto/include/perfetto/tracing" ]
- }
-
# TODO(machenbach): Translate from gyp.
#["OS=="aix"", {
# "ldflags": [ "-Wl,-bbigtoc" ],
@@ -58,9 +49,7 @@ v8_header_set("cctest_headers") {
"../..:internal_config_base",
]
- sources = [
- "cctest.h",
- ]
+ sources = [ "cctest.h" ]
}
v8_source_set("cctest_sources") {
@@ -230,6 +219,7 @@ v8_source_set("cctest_sources") {
"test-intl.cc",
"test-js-weak-refs.cc",
"test-liveedit.cc",
+ "test-local-handles.cc",
"test-lockers.cc",
"test-log.cc",
"test-managed.cc",
@@ -293,6 +283,8 @@ v8_source_set("cctest_sources") {
"wasm/test-streaming-compilation.cc",
"wasm/test-wasm-breakpoints.cc",
"wasm/test-wasm-codegen.cc",
+ "wasm/test-wasm-debug-evaluate.cc",
+ "wasm/test-wasm-debug-evaluate.h",
"wasm/test-wasm-import-wrapper-cache.cc",
"wasm/test-wasm-interpreter-entry.cc",
"wasm/test-wasm-serialization.cc",
@@ -386,6 +378,7 @@ v8_source_set("cctest_sources") {
configs = [
"../..:external_config",
"../..:internal_config_base",
+ "../..:v8_tracing_config",
]
public_deps = [
@@ -400,9 +393,7 @@ v8_source_set("cctest_sources") {
]
defines = []
- deps = [
- "../..:run_torque",
- ]
+ deps = [ "../..:run_torque" ]
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
@@ -432,6 +423,11 @@ v8_source_set("cctest_sources") {
# MSVS wants this for gay-{precision,shortest}.cc.
cflags += [ "/bigobj" ]
+
+ if (symbol_level == 2) {
+ sources += [ "test-v8windbg.cc" ]
+ deps += [ "../../tools/v8windbg:v8windbg_test" ]
+ }
}
if (v8_use_perfetto) {
@@ -439,6 +435,7 @@ v8_source_set("cctest_sources") {
"//third_party/perfetto/include/perfetto/tracing",
"//third_party/perfetto/protos/perfetto/trace/chrome:lite",
"//third_party/perfetto/protos/perfetto/trace/chrome:zero",
+ "//third_party/perfetto/src/tracing:in_process_backend",
]
}
}
@@ -465,7 +462,5 @@ v8_executable("generate-bytecode-expectations") {
"//build/win:default_exe_manifest",
]
- data = [
- "interpreter/bytecode_expectations/",
- ]
+ data = [ "interpreter/bytecode_expectations/" ]
}
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 8a5a5a6d31..e503b51914 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -36,6 +36,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug-interface.h"
#include "src/execution/isolate.h"
+#include "src/execution/simulator.h"
#include "src/flags/flags.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
@@ -358,16 +359,13 @@ static inline v8::Local<v8::Integer> v8_int(int32_t x) {
}
static inline v8::Local<v8::String> v8_str(const char* x) {
- return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x).ToLocalChecked();
}
static inline v8::Local<v8::String> v8_str(v8::Isolate* isolate,
const char* x) {
- return v8::String::NewFromUtf8(isolate, x, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(isolate, x).ToLocalChecked();
}
@@ -437,8 +435,7 @@ static inline v8::MaybeLocal<v8::Value> CompileRun(
static inline v8::Local<v8::Value> CompileRunChecked(v8::Isolate* isolate,
const char* source) {
v8::Local<v8::String> source_string =
- v8::String::NewFromUtf8(isolate, source, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8(isolate, source).ToLocalChecked();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::Local<v8::Script> script =
v8::Script::Compile(context, source_string).ToLocalChecked();
@@ -735,4 +732,65 @@ class TestPlatform : public v8::Platform {
DISALLOW_COPY_AND_ASSIGN(TestPlatform);
};
+#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+ inline bool Init(v8::Isolate* isolate) {
+ simulator_ = reinterpret_cast<v8::internal::Isolate*>(isolate)
+ ->thread_local_top()
+ ->simulator_;
+ // Check if there is active simulator.
+ return simulator_ != nullptr;
+ }
+
+ inline void FillRegisters(v8::RegisterState* state) {
+#if V8_TARGET_ARCH_ARM
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::r11));
+ state->lr = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::lr));
+#elif V8_TARGET_ARCH_ARM64
+ if (simulator_->sp() == 0 || simulator_->fp() == 0) {
+ // It's possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to a new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ return;
+ }
+ state->pc = reinterpret_cast<void*>(simulator_->pc());
+ state->sp = reinterpret_cast<void*>(simulator_->sp());
+ state->fp = reinterpret_cast<void*>(simulator_->fp());
+ state->lr = reinterpret_cast<void*>(simulator_->lr());
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::fp));
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::fp));
+ state->lr = reinterpret_cast<void*>(simulator_->get_lr());
+#elif V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::fp));
+ state->lr = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::ra));
+#endif
+ }
+
+ private:
+ v8::internal::Simulator* simulator_;
+};
+#endif // USE_SIMULATOR
+
#endif // ifndef CCTEST_H_
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 06583f6bd5..444ec9d8ec 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -140,8 +140,6 @@
##############################################################################
['arch == arm64', {
- 'test-api/Bug618': [PASS],
-
# BUG(v8:3385).
'test-serialize/StartupSerializerOnceRunScript': [PASS, FAIL],
'test-serialize/StartupSerializerTwiceRunScript': [PASS, FAIL],
@@ -178,6 +176,12 @@
}], # variant == nooptimization and (arch == arm or arch == arm64) and simulator_run
##############################################################################
+['variant == no_lfa', {
+ # https://crbug.com/v8/10219
+ 'test-compiler/DecideToPretenureDuringCompilation': [SKIP],
+}], # variant == no_lfa
+
+##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
@@ -190,6 +194,10 @@
['msan == True', {
# ICU upstream issues.
'test-strings/CountBreakIterator': [SKIP],
+
+ # BUG(v8:10244): MSAN finding uninitialized bytes during memcmp
+ 'test-code-stub-assembler/SmallOrderedHashSetAllocate': [SKIP],
+ 'test-code-stub-assembler/SmallOrderedHashMapAllocate': [SKIP],
}], # 'msan == True'
##############################################################################
@@ -207,8 +215,8 @@
##############################################################################
['byteorder == big', {
- # Skip WASM atomic tests on big-endian machines.
- # There is no support to emulate atomic WASM operations on big-endian
+ # Skip Wasm atomic tests on big-endian machines.
+ # There is no support to emulate atomic Wasm operations on big-endian
# platforms, since this would require bit swapping as a part of atomic
# operations.
'test-run-wasm-atomics/*': [SKIP],
@@ -455,7 +463,7 @@
'test-api-wasm/WasmStreaming*': [SKIP],
'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP],
'test-c-wasm-entry/*': [SKIP],
- 'test-compilation-cache/TestAsyncCache': [SKIP],
+ 'test-compilation-cache/*': [SKIP],
'test-jump-table-assembler/*': [SKIP],
'test-grow-memory/*': [SKIP],
'test-run-wasm-64/*': [SKIP],
@@ -474,6 +482,7 @@
'test-streaming-compilation/*': [SKIP],
'test-wasm-breakpoints/*': [SKIP],
'test-wasm-codegen/*': [SKIP],
+ 'test-wasm-debug-evaluate/*': [SKIP],
'test-wasm-import-wrapper-cache/*': [SKIP],
'test-wasm-interpreter-entry/*': [SKIP],
'test-wasm-serialization/*': [SKIP],
@@ -484,6 +493,7 @@
# Tests that generate code at runtime.
'codegen-tester/*': [SKIP],
+ 'serializer-tester/*': [SKIP],
'test-accessor-assembler/*': [SKIP],
'test-assembler-*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
@@ -492,7 +502,10 @@
'test-code-generator/*': [SKIP],
'test-code-pages/*': [SKIP],
'test-code-stub-assembler/*': [SKIP],
+ 'test-debug-helper/GetObjectProperties': [SKIP],
+ 'test-disasm-x64/DisasmX64': [SKIP],
'test-js-context-specialization/*': [SKIP],
+ 'test-macro-assembler-x64/EmbeddedObj': [SKIP],
'test-multiple-return/*': [SKIP],
'test-regexp/MacroAssemblernativeAtStart': [SKIP],
'test-regexp/MacroAssemblerNativeBackReferenceLATIN1': [SKIP],
@@ -523,8 +536,11 @@
'test-run-tail-calls/*': [SKIP],
'test-run-unwinding-info/*': [SKIP],
'test-run-variables/*': [SKIP],
+ 'test-serialize/*': [SKIP],
'test-torque/*': [SKIP],
- 'test-macro-assembler-x64/EmbeddedObj': [SKIP],
+ 'test-unwinder/PCIsInV8_InCodeOrEmbeddedRange': [SKIP],
+ 'test-unwinder/PCIsInV8_LargeCodeObject': [SKIP],
+ 'test-unwinder-code-pages/PCIsInV8_LargeCodeObject_CodePagesAPI': [SKIP],
# Field representation tracking is disabled in jitless mode.
'test-field-type-tracking/*': [SKIP],
@@ -600,11 +616,4 @@
'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
}], # variant == turboprop
-##############################################################################
-['variant != future', {
- # Wasm native module cache is temporarily disabled in non-future variant
- # (https://crbug.com/1070199)
- 'test-compilation-cache/*': [SKIP]
-}], # variant != future
-
]
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index e3db983488..9c9210f77e 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -34,12 +34,6 @@ Node* UndefinedConstant(CodeAssembler* m) {
return m->LoadRoot(RootIndex::kUndefinedValue);
}
-TNode<Smi> SmiFromInt32(CodeAssembler* m, Node* value) {
- value = m->ChangeInt32ToIntPtr(value);
- return m->BitcastWordToTaggedSigned(
- m->WordShl(value, kSmiShiftSize + kSmiTagSize));
-}
-
Node* LoadObjectField(CodeAssembler* m, Node* object, int offset,
MachineType type = MachineType::AnyTagged()) {
return m->Load(type, object, m->IntPtrConstant(offset - kHeapObjectTag));
@@ -87,7 +81,7 @@ TEST(SimpleCallRuntime1Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = SmiTag(&m, m.Int32Constant(0));
+ TNode<Smi> b = SmiTag(&m, m.Int32Constant(0));
m.Return(m.CallRuntime(Runtime::kIsSmi, context, b));
FunctionTester ft(asm_tester.GenerateCode());
CHECK(ft.CallChecked<Oddball>().is_identical_to(
@@ -100,7 +94,7 @@ TEST(SimpleTailCallRuntime1Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = SmiTag(&m, m.Int32Constant(0));
+ TNode<Smi> b = SmiTag(&m, m.Int32Constant(0));
m.TailCallRuntime(Runtime::kIsSmi, context, b);
FunctionTester ft(asm_tester.GenerateCode());
CHECK(ft.CallChecked<Oddball>().is_identical_to(
@@ -113,8 +107,8 @@ TEST(SimpleCallRuntime2Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* a = SmiTag(&m, m.Int32Constant(2));
- Node* b = SmiTag(&m, m.Int32Constant(4));
+ TNode<Smi> a = SmiTag(&m, m.Int32Constant(2));
+ TNode<Smi> b = SmiTag(&m, m.Int32Constant(4));
m.Return(m.CallRuntime(Runtime::kAdd, context, a, b));
FunctionTester ft(asm_tester.GenerateCode());
CHECK_EQ(6, ft.CallChecked<Smi>()->value());
@@ -126,8 +120,8 @@ TEST(SimpleTailCallRuntime2Arg) {
CodeAssembler m(asm_tester.state());
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* a = SmiTag(&m, m.Int32Constant(2));
- Node* b = SmiTag(&m, m.Int32Constant(4));
+ TNode<Smi> a = SmiTag(&m, m.Int32Constant(2));
+ TNode<Smi> b = SmiTag(&m, m.Int32Constant(4));
m.TailCallRuntime(Runtime::kAdd, context, a, b);
FunctionTester ft(asm_tester.GenerateCode());
CHECK_EQ(6, ft.CallChecked<Smi>()->value());
@@ -446,127 +440,6 @@ TEST(TestOutOfScopeVariable) {
CHECK(!asm_tester.GenerateCode().is_null());
}
-TEST(GotoIfException) {
- Isolate* isolate(CcTest::InitIsolateOnce());
-
- const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeAssembler m(asm_tester.state());
-
- TNode<Context> context =
- m.HeapConstant(Handle<Context>(isolate->native_context()));
- TNode<Symbol> to_string_tag =
- m.HeapConstant(isolate->factory()->to_string_tag_symbol());
- Variable exception(&m, MachineRepresentation::kTagged);
-
- CodeAssemblerLabel exception_handler(&m);
- Callable to_string = Builtins::CallableFor(isolate, Builtins::kToString);
- TNode<Object> string = m.CallStub(to_string, context, to_string_tag);
- m.GotoIfException(string, &exception_handler, &exception);
- m.Return(string);
-
- m.Bind(&exception_handler);
- m.Return(m.UncheckedCast<Object>(exception.value()));
-
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result = ft.Call().ToHandleChecked();
-
- // Should be a TypeError.
- CHECK(result->IsJSObject());
-
- Handle<Object> constructor =
- Object::GetPropertyOrElement(isolate, result,
- isolate->factory()->constructor_string())
- .ToHandleChecked();
- CHECK(constructor->SameValue(*isolate->type_error_function()));
-}
-
-TEST(GotoIfExceptionMultiple) {
- Isolate* isolate(CcTest::InitIsolateOnce());
-
- const int kNumParams = 4; // receiver, first, second, third
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeAssembler m(asm_tester.state());
-
- TNode<Context> context =
- m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* first_value = m.Parameter(0);
- Node* second_value = m.Parameter(1);
- Node* third_value = m.Parameter(2);
-
- CodeAssemblerLabel exception_handler1(&m);
- CodeAssemblerLabel exception_handler2(&m);
- CodeAssemblerLabel exception_handler3(&m);
- Variable return_value(&m, MachineRepresentation::kWord32);
- Variable error(&m, MachineRepresentation::kTagged);
-
- return_value.Bind(m.Int32Constant(0));
-
- // try { return ToString(param1) } catch (e) { ... }
- Callable to_string = Builtins::CallableFor(isolate, Builtins::kToString);
- TNode<Object> string = m.CallStub(to_string, context, first_value);
- m.GotoIfException(string, &exception_handler1, &error);
- m.Return(string);
-
- // try { ToString(param2); return 7 } catch (e) { ... }
- m.Bind(&exception_handler1);
- return_value.Bind(m.Int32Constant(7));
- error.Bind(UndefinedConstant(&m));
- string = m.CallStub(to_string, context, second_value);
- m.GotoIfException(string, &exception_handler2, &error);
- m.Return(SmiFromInt32(&m, return_value.value()));
-
- // try { ToString(param3); return 7 & ~2; } catch (e) { return e; }
- m.Bind(&exception_handler2);
- // Return returnValue & ~2
- error.Bind(UndefinedConstant(&m));
- string = m.CallStub(to_string, context, third_value);
- m.GotoIfException(string, &exception_handler3, &error);
- m.Return(SmiFromInt32(
- &m, m.Word32And(return_value.value(),
- m.Word32Xor(m.Int32Constant(2), m.Int32Constant(-1)))));
-
- m.Bind(&exception_handler3);
- m.Return(m.UncheckedCast<Object>(error.value()));
-
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
-
- Handle<Object> result;
- // First handler does not throw, returns result of first value.
- result = ft.Call(isolate->factory()->undefined_value(),
- isolate->factory()->to_string_tag_symbol())
- .ToHandleChecked();
- CHECK(String::cast(*result).IsOneByteEqualTo(OneByteVector("undefined")));
-
- // First handler returns a number.
- result = ft.Call(isolate->factory()->to_string_tag_symbol(),
- isolate->factory()->undefined_value())
- .ToHandleChecked();
- CHECK_EQ(7, Smi::ToInt(*result));
-
- // First handler throws, second handler returns a number.
- result = ft.Call(isolate->factory()->to_string_tag_symbol(),
- isolate->factory()->to_primitive_symbol())
- .ToHandleChecked();
- CHECK_EQ(7 & ~2, Smi::ToInt(*result));
-
- // First handler throws, second handler throws, third handler returns thrown
- // value.
- result = ft.Call(isolate->factory()->to_string_tag_symbol(),
- isolate->factory()->to_primitive_symbol(),
- isolate->factory()->unscopables_symbol())
- .ToHandleChecked();
-
- // Should be a TypeError.
- CHECK(result->IsJSObject());
-
- Handle<Object> constructor =
- Object::GetPropertyOrElement(isolate, result,
- isolate->factory()->constructor_string())
- .ToHandleChecked();
- CHECK(constructor->SameValue(*isolate->type_error_function()));
-}
-
TEST(ExceptionHandler) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
@@ -576,7 +449,7 @@ TEST(ExceptionHandler) {
CodeAssembler::TVariable<Object> var(m.SmiConstant(0), &m);
CodeAssemblerLabel exception(&m, {&var}, CodeAssemblerLabel::kDeferred);
{
- CodeAssemblerScopedExceptionHandler handler(&m, &exception, &var);
+ ScopedExceptionHandler handler(&m, &exception, &var);
TNode<Context> context =
m.HeapConstant(Handle<Context>(isolate->native_context()));
m.CallRuntime(Runtime::kThrow, context, m.SmiConstant(2));
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 3e66856189..16faf976b6 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -5,6 +5,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h"
@@ -1002,6 +1003,8 @@ class CodeGeneratorTester {
Builtins::kNoBuiltinId, kMaxUnoptimizedFrameHeight,
kMaxPushedArgumentCount);
+ generator_->tasm()->CodeEntry();
+
// Force a frame to be created.
generator_->frame_access_state()->MarkHasFrame(true);
generator_->AssembleConstructFrame();
@@ -1069,7 +1072,7 @@ class CodeGeneratorTester {
CodeGeneratorTester::PushTypeFlag push_type) {
generator_->AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_PPC)
+ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
// Only folding register pushes is supported on ARM.
bool supported = ((push_type & CodeGenerator::kRegisterPush) == push_type);
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) || \
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 156ea8074a..fe12950bba 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -34,11 +34,11 @@ CallDescriptor* CreateCallDescriptor(Zone* zone, int return_count,
wasm::FunctionSig::Builder builder(zone, return_count, param_count);
for (int i = 0; i < param_count; i++) {
- builder.AddParam(wasm::ValueTypes::ValueTypeFor(type));
+ builder.AddParam(wasm::ValueType::For(type));
}
for (int i = 0; i < return_count; i++) {
- builder.AddReturn(wasm::ValueTypes::ValueTypeFor(type));
+ builder.AddReturn(wasm::ValueType::For(type));
}
return compiler::GetWasmCallDescriptor(zone, builder.Build());
}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
index 2ce6242e9e..724dc05ebb 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -14,8 +14,8 @@ namespace compiler {
TEST(ArgumentsMapped) {
FunctionTester T("(function(a) { return arguments; })");
- Handle<Object> arguments;
- T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
+ Handle<Object> arguments =
+ T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandleChecked();
CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
CHECK(JSObject::cast(*arguments).HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
@@ -28,8 +28,8 @@ TEST(ArgumentsMapped) {
TEST(ArgumentsUnmapped) {
FunctionTester T("(function(a) { 'use strict'; return arguments; })");
- Handle<Object> arguments;
- T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
+ Handle<Object> arguments =
+ T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandleChecked();
CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
CHECK(!JSObject::cast(*arguments).HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
@@ -42,8 +42,8 @@ TEST(ArgumentsUnmapped) {
TEST(ArgumentsRest) {
FunctionTester T("(function(a, ...args) { return args; })");
- Handle<Object> arguments;
- T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
+ Handle<Object> arguments =
+ T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandleChecked();
CHECK(arguments->IsJSObject() && arguments->IsJSArray());
CHECK(!JSObject::cast(*arguments).HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index 6f52f339f3..2def65a79e 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -264,15 +264,17 @@ void RunLoadImmIndex(MachineType type, TestAlignment t) {
for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<CType> m;
- void* base_pointer = ComputeOffset(&buffer[0], offset * sizeof(CType));
+ CType* base_pointer = reinterpret_cast<CType*>(
+ ComputeOffset(&buffer[0], offset * sizeof(CType)));
#ifdef V8_COMPRESS_POINTERS
if (type.IsTagged()) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
- base_pointer = LSB(base_pointer, kTaggedSize);
+ base_pointer = reinterpret_cast<CType*>(LSB(base_pointer, kTaggedSize));
}
#endif
+
Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) {
@@ -300,15 +302,21 @@ void RunLoadStore(MachineType type, TestAlignment t) {
MemCopy(&zap_value, &zap_data, sizeof(CType));
InitBuffer(in_buffer, kNumElems, type);
+#ifdef V8_TARGET_BIG_ENDIAN
+ int offset = sizeof(CType) - ElementSizeInBytes(type.representation());
+#else
+ int offset = 0;
+#endif
+
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* in_base = m.PointerConstant(in_buffer);
- Node* in_index = m.IntPtrConstant(x * sizeof(CType));
+ Node* in_index = m.IntPtrConstant(x * sizeof(CType) + offset);
Node* out_base = m.PointerConstant(out_buffer);
- Node* out_index = m.IntPtrConstant(y * sizeof(CType));
+ Node* out_index = m.IntPtrConstant(y * sizeof(CType) + offset);
if (t == TestAlignment::kAligned) {
Node* load = m.Load(type, in_base, in_index);
m.Store(type.representation(), out_base, out_index, load,
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 30ebfd0502..15f9c2d89f 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -173,8 +173,8 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
if (!force_completion) return;
while (!marking->IsComplete()) {
- marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::StepOrigin::kV8);
+ marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 684bda4411..d92375d362 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -46,8 +46,8 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
// we wrap the allocator function in an AlwaysAllocateScope. Test that
// all allocations succeed immediately without any retry.
CcTest::CollectAllAvailableGarbage();
- AlwaysAllocateScope scope(CcTest::i_isolate());
Heap* heap = CcTest::heap();
+ AlwaysAllocateScopeForTesting scope(heap);
int size = FixedArray::SizeFor(100);
// Young generation.
HeapObject obj =
@@ -159,17 +159,13 @@ TEST(StressJS) {
// Add the Foo constructor the global object.
CHECK(env->Global()
- ->Set(env, v8::String::NewFromUtf8(CcTest::isolate(), "Foo",
- v8::NewStringType::kNormal)
- .ToLocalChecked(),
+ ->Set(env, v8::String::NewFromUtf8Literal(CcTest::isolate(), "Foo"),
v8::Utils::CallableToLocal(function))
.FromJust());
// Call the accessor through JavaScript.
v8::Local<v8::Value> result =
- v8::Script::Compile(
- env, v8::String::NewFromUtf8(CcTest::isolate(), "(new Foo).get",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
+ v8::Script::Compile(env, v8::String::NewFromUtf8Literal(CcTest::isolate(),
+ "(new Foo).get"))
.ToLocalChecked()
->Run(env)
.ToLocalChecked();
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 82cc6c84f3..56730e7b76 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -42,6 +42,11 @@ bool IsTracked(i::Heap* heap, i::ArrayBufferExtension* extension) {
return in_young || in_old;
}
+bool IsTracked(i::Heap* heap, i::JSArrayBuffer buffer) {
+ return V8_ARRAY_BUFFER_EXTENSION_BOOL ? IsTracked(heap, buffer.extension())
+ : IsTracked(buffer);
+}
+
} // namespace
namespace v8 {
@@ -504,6 +509,7 @@ TEST(ArrayBuffer_ExternalBackingStoreSizeIncreases) {
}
TEST(ArrayBuffer_ExternalBackingStoreSizeDecreases) {
+ FLAG_concurrent_array_buffer_sweeping = false;
CcTest::InitializeVM();
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -525,9 +531,10 @@ TEST(ArrayBuffer_ExternalBackingStoreSizeDecreases) {
}
TEST(ArrayBuffer_ExternalBackingStoreSizeIncreasesMarkCompact) {
- if (FLAG_never_compact || V8_ARRAY_BUFFER_EXTENSION_BOOL) return;
+ if (FLAG_never_compact) return;
ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_concurrent_array_buffer_sweeping = false;
CcTest::InitializeVM();
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -544,13 +551,13 @@ TEST(ArrayBuffer_ExternalBackingStoreSizeIncreasesMarkCompact) {
Local<v8::ArrayBuffer> ab1 =
v8::ArrayBuffer::New(isolate, kArraybufferSize);
Handle<JSArrayBuffer> buf1 = v8::Utils::OpenHandle(*ab1);
- CHECK(IsTracked(*buf1));
+ CHECK(IsTracked(heap, *buf1));
heap::GcAndSweep(heap, NEW_SPACE);
heap::GcAndSweep(heap, NEW_SPACE);
Page* page_before_gc = Page::FromHeapObject(*buf1);
heap::ForceEvacuationCandidate(page_before_gc);
- CHECK(IsTracked(*buf1));
+ CHECK(IsTracked(heap, *buf1));
CcTest::CollectAllGarbage();
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
index 7eb03e10e2..d6cffd43ca 100644
--- a/deps/v8/test/cctest/heap/test-external-string-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -147,8 +147,7 @@ TEST(ExternalString_ExternalBackingStoreSizeIncreasesAfterExternalization) {
// Allocate normal string in the new gen.
v8::Local<v8::String> str =
- v8::String::NewFromUtf8(isolate, TEST_STR, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(isolate, TEST_STR);
CHECK_EQ(0, heap->new_space()->ExternalBackingStoreBytes(type) -
new_backing_store_before);
@@ -199,8 +198,7 @@ TEST(ExternalString_PromotedThinString) {
// New external string in the young space. This string has the same content
// as the previous one (that was already internalized).
v8::Local<v8::String> string2 =
- v8::String::NewFromUtf8(isolate, TEST_STR, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(isolate, TEST_STR);
bool success =
string2->MakeExternal(new TestOneByteResource(i::StrDup(TEST_STR)));
CHECK(success);
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 5eaa28404e..d181f764f8 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -1576,7 +1576,7 @@ HEAP_TEST(TestSizeOfObjects) {
// Allocate objects on several different old-space pages so that
// concurrent sweeper threads will be busy sweeping the old space on
// subsequent GC runs.
- AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ AlwaysAllocateScopeForTesting always_allocate(heap);
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
isolate->factory()->NewFixedArray(8192, AllocationType::kOld);
@@ -2175,8 +2175,8 @@ TEST(InstanceOfStubWriteBarrier) {
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
- marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
}
CHECK(marking->IsMarking());
@@ -2189,7 +2189,6 @@ TEST(InstanceOfStubWriteBarrier) {
g->Call(ctx, global, 0, nullptr).ToLocalChecked();
}
- CcTest::heap()->incremental_marking()->set_should_hurry(true);
CcTest::CollectGarbage(OLD_SPACE);
}
@@ -2269,8 +2268,8 @@ TEST(IdleNotificationFinishMarking) {
const double kStepSizeInMs = 100;
do {
- marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
} while (!CcTest::heap()
->mark_compact_collector()
->marking_worklists()
@@ -2299,7 +2298,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
heap::SimulateFullSpace(CcTest::heap()->new_space());
- AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ AlwaysAllocateScopeForTesting always_allocate(CcTest::heap());
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
" this.x = x;"
@@ -2823,7 +2822,7 @@ TEST(Regress1465) {
CompileRun("function F() {}");
{
- AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ AlwaysAllocateScopeForTesting always_allocate(CcTest::i_isolate()->heap());
for (int i = 0; i < transitions_count; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "var o = new F; o.prop%d = %d;", i, i);
@@ -2861,7 +2860,7 @@ static i::Handle<JSObject> GetByName(const char* name) {
#ifdef DEBUG
static void AddTransitions(int transitions_count) {
- AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ AlwaysAllocateScopeForTesting always_allocate(CcTest::i_isolate()->heap());
for (int i = 0; i < transitions_count; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "var o = new F; o.prop%d = %d;", i, i);
@@ -3011,7 +3010,7 @@ TEST(ReleaseOverReservedPages) {
const int initial_page_count = old_space->CountTotalPages();
const int overall_page_count = number_of_test_pages + initial_page_count;
for (int i = 0; i < number_of_test_pages; i++) {
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
heap::SimulateFullSpace(old_space);
factory->NewFixedArray(1, AllocationType::kOld);
}
@@ -3507,8 +3506,13 @@ TEST(DetailedErrorStackTraceBuiltinExit) {
FixedArray parameters = stack_trace->Parameters(0);
CHECK_EQ(parameters.length(), 2);
+#ifdef V8_REVERSE_JSARGS
+ CHECK(parameters.get(1).IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters.get(1)), 9999);
+#else
CHECK(parameters.get(0).IsSmi());
CHECK_EQ(Smi::ToInt(parameters.get(0)), 9999);
+#endif
});
}
@@ -3588,7 +3592,7 @@ TEST(Regress169928) {
// This should crash with a protection violation if we are running a build
// with the bug.
- AlwaysAllocateScope aa_scope(isolate);
+ AlwaysAllocateScopeForTesting aa_scope(isolate->heap());
v8::Script::Compile(env.local(), mote_code_string)
.ToLocalChecked()
->Run(env.local())
@@ -4858,10 +4862,8 @@ TEST(Regress357137) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope hscope(isolate);
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
- global->Set(
- v8::String::NewFromUtf8(isolate, "interrupt", v8::NewStringType::kNormal)
- .ToLocalChecked(),
- v8::FunctionTemplate::New(isolate, RequestInterrupt));
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "interrupt"),
+ v8::FunctionTemplate::New(isolate, RequestInterrupt));
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
CHECK(!context.IsEmpty());
v8::Context::Scope cscope(context);
@@ -4985,7 +4987,6 @@ TEST(Regress3631) {
"for (var i = 0; i < 50; i++) {"
" weak_map.set(future_keys[i], i);"
"}");
- heap->incremental_marking()->set_should_hurry(true);
CcTest::CollectGarbage(OLD_SPACE);
}
@@ -5131,7 +5132,7 @@ void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) {
CHECK(IsAligned(bytes, kTaggedSize));
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(isolate->heap());
int elements =
static_cast<int>((bytes - FixedArray::kHeaderSize) / kTaggedSize);
Handle<FixedArray> array = factory->NewFixedArray(
@@ -5217,10 +5218,8 @@ TEST(MessageObjectLeak) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
- global->Set(
- v8::String::NewFromUtf8(isolate, "check", v8::NewStringType::kNormal)
- .ToLocalChecked(),
- v8::FunctionTemplate::New(isolate, CheckLeak));
+ global->Set(v8::String::NewFromUtf8Literal(isolate, "check"),
+ v8::FunctionTemplate::New(isolate, CheckLeak));
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope cscope(context);
@@ -5421,7 +5420,7 @@ HEAP_TEST(Regress589413) {
{
// Ensure that incremental marking is not started unexpectedly.
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(isolate->heap());
// Make sure the byte arrays will be promoted on the next GC.
CcTest::CollectGarbage(NEW_SPACE);
@@ -5553,9 +5552,9 @@ TEST(Regress598319) {
// only partially marked the large object.
const double kSmallStepSizeInMs = 0.1;
while (!marking->IsComplete()) {
- marking->V8Step(kSmallStepSizeInMs,
- i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->Step(kSmallStepSizeInMs,
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->ProgressBar() > 0) {
CHECK_NE(page->ProgressBar(), arr.get().Size());
{
@@ -5573,9 +5572,9 @@ TEST(Regress598319) {
// Finish marking with bigger steps to speed up test.
const double kLargeStepSizeInMs = 1000;
while (!marking->IsComplete()) {
- marking->V8Step(kLargeStepSizeInMs,
- i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->Step(kLargeStepSizeInMs,
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5651,14 +5650,14 @@ TEST(Regress615489) {
CHECK(marking->IsMarking());
marking->StartBlackAllocationForTesting();
{
- AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ AlwaysAllocateScopeForTesting always_allocate(heap);
v8::HandleScope inner(CcTest::isolate());
isolate->factory()->NewFixedArray(500, AllocationType::kOld)->Size();
}
const double kStepSizeInMs = 100;
while (!marking->IsComplete()) {
- marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -5711,8 +5710,7 @@ TEST(Regress631969) {
// Allocate a cons string and promote it to a fresh page in the old space.
heap::SimulateFullSpace(heap->old_space());
- Handle<String> s3;
- factory->NewConsString(s1, s2).ToHandle(&s3);
+ Handle<String> s3 = factory->NewConsString(s1, s2).ToHandleChecked();
CcTest::CollectGarbage(NEW_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
@@ -5720,8 +5718,8 @@ TEST(Regress631969) {
const double kStepSizeInMs = 100;
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
- marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
+ marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -6391,13 +6389,13 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
heap::SimulateIncrementalMarking(heap, false);
Handle<Map> map;
{
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
map = isolate->factory()->NewMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
}
heap->incremental_marking()->StartBlackAllocationForTesting();
Handle<HeapObject> object;
{
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
object = handle(isolate->factory()->NewForTest(map, AllocationType::kOld),
isolate);
}
@@ -6893,6 +6891,48 @@ TEST(Regress9701) {
CHECK_EQ(mark_sweep_count_before, mark_sweep_count_after);
}
+#if defined(V8_TARGET_ARCH_64_BIT) && !defined(V8_OS_ANDROID)
+UNINITIALIZED_TEST(HugeHeapLimit) {
+ uint64_t kMemoryGB = 16;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ create_params.constraints.ConfigureDefaults(kMemoryGB * GB, kMemoryGB * GB);
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+#ifdef V8_COMPRESS_POINTERS
+ size_t kExpectedHeapLimit = Heap::AllocatorLimitOnMaxOldGenerationSize();
+#else
+ size_t kExpectedHeapLimit = size_t{4} * GB;
+#endif
+ CHECK_EQ(kExpectedHeapLimit, i_isolate->heap()->MaxOldGenerationSize());
+ CHECK_LT(size_t{3} * GB, i_isolate->heap()->MaxOldGenerationSize());
+ isolate->Dispose();
+}
+#endif
+
+UNINITIALIZED_TEST(HeapLimit) {
+ uint64_t kMemoryGB = 15;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ create_params.constraints.ConfigureDefaults(kMemoryGB * GB, kMemoryGB * GB);
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+#if defined(V8_TARGET_ARCH_64_BIT) && !defined(V8_OS_ANDROID)
+ size_t kExpectedHeapLimit = size_t{2} * GB;
+#else
+ size_t kExpectedHeapLimit = size_t{1} * GB;
+#endif
+ CHECK_EQ(kExpectedHeapLimit, i_isolate->heap()->MaxOldGenerationSize());
+ isolate->Dispose();
+}
+
+TEST(NoCodeRangeInJitlessMode) {
+ if (!FLAG_jitless) return;
+ CcTest::InitializeVM();
+ CHECK(
+ CcTest::i_isolate()->heap()->memory_allocator()->code_range().is_empty());
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index 861c48d69d..67e5c0d48e 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -24,12 +24,11 @@ Page* HeapTester::AllocateByteArraysOnPage(
const int kLength = 256 - ByteArray::kHeaderSize;
const int kSize = ByteArray::SizeFor(kLength);
CHECK_EQ(kSize, 256);
- Isolate* isolate = heap->isolate();
PagedSpace* old_space = heap->old_space();
Page* page;
// Fill a page with byte arrays.
{
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
heap::SimulateFullSpace(old_space);
ByteArray byte_array;
CHECK(AllocateByteArrayForTest(heap, kLength, AllocationType::kOld)
@@ -181,7 +180,7 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
Handle<FixedArray> AllocateArrayOnFreshPage(Isolate* isolate,
PagedSpace* old_space, int length) {
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(isolate->heap());
heap::SimulateFullSpace(old_space);
return isolate->factory()->NewFixedArray(length, AllocationType::kOld);
}
@@ -242,7 +241,7 @@ HEAP_TEST(InvalidatedSlotsRightTrimLargeFixedArray) {
AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
Handle<FixedArray> trimmed;
{
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
trimmed = factory->NewFixedArray(
kMaxRegularHeapObjectSize / kTaggedSize + 100, AllocationType::kOld);
DCHECK(MemoryChunk::FromHeapObject(*trimmed)->InLargeObjectSpace());
@@ -319,7 +318,7 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
AllocateArrayOnFreshPage(isolate, old_space, 1);
Handle<JSObject> obj;
{
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
Handle<JSFunction> function = factory->NewFunctionForTest(name);
function->shared().set_expected_nof_properties(3);
obj = factory->NewJSObject(function, AllocationType::kOld);
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 56649dd1dd..eb91a5e671 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -350,7 +350,7 @@ TEST(SizeOfInitialHeap) {
// snapshot.
// In PPC the page size is 64K, causing more internal fragmentation
// hence requiring a larger limit.
-#if V8_OS_LINUX && V8_HOST_ARCH_PPC
+#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64)
const size_t kMaxInitialSizePerSpace = 3 * MB;
#else
const size_t kMaxInitialSizePerSpace = 2 * MB;
@@ -568,7 +568,7 @@ HEAP_TEST(Regress777177) {
{
// Ensure a new linear allocation area on a fresh page.
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
heap::SimulateFullSpace(old_space);
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
HeapObject obj = result.ToObjectChecked();
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 6d2e697472..7be26694ad 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -51,8 +51,7 @@ const char* const BytecodeExpectationsPrinter::kIndent = " ";
v8::Local<v8::String> BytecodeExpectationsPrinter::V8StringFromUTF8(
const char* data) const {
- return v8::String::NewFromUtf8(isolate_, data, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(isolate_, data).ToLocalChecked();
}
std::string BytecodeExpectationsPrinter::WrapCodeInFunction(
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 067979d20d..98d2c6c61f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
/* 50 S> */ B(Return),
]
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
@@ -60,9 +58,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(4),
/* 61 S> */ B(Return),
]
@@ -78,9 +75,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 66
+bytecode array length: 65
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(4),
@@ -123,9 +119,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
/* 52 S> */ B(CreateArrayFromIterable),
@@ -143,9 +138,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 75
+bytecode array length: 74
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
/* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
@@ -190,9 +184,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
/* 52 S> */ B(CreateArrayFromIterable),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index 07d2ea75ef..826018e952 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -12,9 +12,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 49 S> */ B(LdaSmi), I8(1),
@@ -42,9 +41,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(55),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(100),
@@ -65,9 +63,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(55),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(100),
@@ -95,9 +92,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 28
+bytecode array length: 27
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(55),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(56),
@@ -125,9 +121,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(55),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(1),
@@ -158,9 +153,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(55),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(1),
@@ -190,9 +184,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 73
+bytecode array length: 72
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 50 S> */ B(LdaSmi), I8(20),
@@ -238,9 +231,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 43
+bytecode array length: 42
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(17),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 8691944b20..8701e50592 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,13 +14,12 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 148
+bytecode array length: 144
bytecodes: [
- /* 17 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
+ /* 17 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
@@ -35,9 +34,12 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(53),
- B(Ldar), R(5),
- B(Jump), U8(36),
+ B(Jump), U8(50),
+ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
+ B(Jump), U8(41),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(3),
B(Star), R(4),
@@ -53,10 +55,6 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -93,8 +91,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [20, 94, 102],
- [23, 56, 60],
+ [19, 98, 98],
+ [22, 64, 64],
]
---
@@ -104,13 +102,12 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 193
+bytecode array length: 189
bytecodes: [
- /* 17 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
+ /* 17 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
@@ -125,7 +122,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(98),
+ B(Jump), U8(95),
/* 22 S> */ B(LdaSmi), I8(42),
B(Star), R(6),
B(LdaFalse),
@@ -142,9 +139,12 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(53),
- B(Ldar), R(5),
- B(Jump), U8(36),
+ B(Jump), U8(50),
+ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
+ B(Jump), U8(41),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(6),
B(Star), R(4),
@@ -160,10 +160,6 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -203,8 +199,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [20, 139, 147],
- [23, 101, 105],
+ [19, 143, 143],
+ [22, 109, 109],
]
---
@@ -214,13 +210,12 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 364
+bytecode array length: 361
bytecodes: [
- /* 17 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ /* 17 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(0),
B(Mov), R(context), R(6),
B(Mov), R(context), R(7),
@@ -254,13 +249,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(LdaNamedProperty), R(14), U8(6), U8(9),
- B(JumpIfToBooleanTrue), U8(67),
+ B(JumpIfToBooleanTrue), U8(66),
B(LdaNamedProperty), R(14), U8(7), U8(11),
B(Star), R(14),
B(LdaFalse),
B(Star), R(10),
B(Mov), R(14), R(1),
- /* 22 E> */ B(StackCheck),
/* 31 S> */ B(Mov), R(1), R(3),
/* 42 S> */ B(LdaFalse),
B(Star), R(17),
@@ -279,7 +273,7 @@ bytecodes: [
B(Mov), R(15), R(12),
B(Jump), U8(20),
B(Ldar), R(15),
- B(JumpLoop), U8(84), I8(0),
+ /* 22 E> */ B(JumpLoop), U8(83), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(12),
B(Star), R(11),
@@ -325,8 +319,12 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(4),
B(Mov), R(12), R(5),
- B(Jump), U8(51),
- B(Jump), U8(36),
+ B(Jump), U8(50),
+ B(LdaUndefined),
+ B(Star), R(5),
+ B(LdaSmi), I8(1),
+ B(Star), R(4),
+ B(Jump), U8(41),
B(Star), R(8),
B(CreateCatchContext), R(8), U8(14),
B(Star), R(7),
@@ -342,10 +340,6 @@ bytecodes: [
B(Star), R(5),
B(LdaSmi), I8(2),
B(Star), R(4),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(5),
- B(Star), R(4),
B(Jump), U8(7),
B(Star), R(5),
B(LdaZero),
@@ -374,7 +368,7 @@ bytecodes: [
]
constant pool: [
Smi [29],
- Smi [142],
+ Smi [141],
Smi [16],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -388,16 +382,16 @@ constant pool: [
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
- Smi [269],
+ Smi [267],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 310, 318],
- [23, 274, 276],
- [87, 174, 182],
- [206, 239, 241],
+ [19, 315, 315],
+ [22, 281, 281],
+ [86, 172, 180],
+ [204, 237, 239],
]
---
@@ -408,13 +402,12 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 467
+bytecode array length: 463
bytecodes: [
- /* 44 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
+ /* 44 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
@@ -477,7 +470,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(10), R(2),
- B(Jump), U8(241),
+ B(Jump), U8(238),
B(LdaNamedProperty), R(7), U8(14), U8(20),
B(JumpIfUndefinedOrNull), U8(11),
B(Star), R(12),
@@ -545,9 +538,12 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(7), R(2),
- B(Jump), U8(53),
- B(Ldar), R(7),
- B(Jump), U8(36),
+ B(Jump), U8(50),
+ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
+ B(Jump), U8(41),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(17),
B(Star), R(4),
@@ -563,10 +559,6 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
- B(Jump), U8(15),
- B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -612,13 +604,13 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
- Smi [372],
+ Smi [369],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 413, 421],
- [23, 375, 379],
+ [19, 417, 417],
+ [22, 383, 383],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
index 6d88ba285a..94d285aa15 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
@@ -12,162 +12,148 @@ top level await: yes
snippet: "
await 42;
"
-frame size: 8
-parameter count: 2
-bytecode array length: 142
+frame size: 7
+parameter count: 1
+bytecode array length: 127
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 10 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(Ldar), R(1),
- B(Mov), R(context), R(3),
+ B(Mov), R(context), R(2),
/* 0 S> */ B(LdaSmi), I8(42),
- B(Star), R(5),
- B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(4),
B(Star), R(4),
+ B(Mov), R(0), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(5),
+ B(Star), R(4),
B(LdaZero),
- B(TestReferenceEqual), R(5),
+ B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
- B(Ldar), R(4),
+ B(Ldar), R(3),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(5),
+ B(Star), R(4),
B(LdaTrue),
- B(Star), R(6),
- B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ B(Star), R(5),
+ B(Mov), R(0), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 10 S> */ B(Return),
- B(Star), R(4),
- B(CreateCatchContext), R(4), U8(5),
B(Star), R(3),
+ B(CreateCatchContext), R(3), U8(4),
+ B(Star), R(2),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(3),
- B(PushContext), R(4),
+ B(Ldar), R(2),
+ B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 10 S> */ B(Return),
]
constant pool: [
- Smi [35],
- Smi [79],
- SCOPE_INFO_TYPE,
+ Smi [21],
+ Smi [65],
Smi [10],
Smi [7],
SCOPE_INFO_TYPE,
]
handlers: [
- [64, 114, 114],
+ [49, 99, 99],
]
---
snippet: "
await import(\"foo\");
"
-frame size: 8
-parameter count: 2
-bytecode array length: 152
+frame size: 7
+parameter count: 1
+bytecode array length: 137
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 21 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(Ldar), R(1),
- B(Mov), R(context), R(3),
- /* 0 S> */ B(LdaConstant), U8(5),
- B(Star), R(5),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kDynamicImportCall), R(4), U8(2),
- B(Star), R(5),
- B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Mov), R(context), R(2),
+ /* 0 S> */ B(LdaConstant), U8(4),
+ B(Star), R(4),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kDynamicImportCall), R(3), U8(2),
B(Star), R(4),
+ B(Mov), R(0), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(5),
+ B(Star), R(4),
B(LdaZero),
- B(TestReferenceEqual), R(5),
+ B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
- B(Ldar), R(4),
+ B(Ldar), R(3),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(5),
+ B(Star), R(4),
B(LdaTrue),
- B(Star), R(6),
- B(Mov), R(0), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ B(Star), R(5),
+ B(Mov), R(0), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 21 S> */ B(Return),
- B(Star), R(4),
- B(CreateCatchContext), R(4), U8(6),
B(Star), R(3),
+ B(CreateCatchContext), R(3), U8(5),
+ B(Star), R(2),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(3),
- B(PushContext), R(4),
+ B(Ldar), R(2),
+ B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 21 S> */ B(Return),
]
constant pool: [
- Smi [35],
- Smi [89],
- SCOPE_INFO_TYPE,
+ Smi [21],
+ Smi [75],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
SCOPE_INFO_TYPE,
]
handlers: [
- [64, 124, 124],
+ [49, 109, 109],
]
---
@@ -178,84 +164,77 @@ snippet: "
}
foo();
"
-frame size: 9
-parameter count: 2
-bytecode array length: 153
+frame size: 8
+parameter count: 1
+bytecode array length: 138
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
B(Star), R(0),
- B(CreateClosure), U8(3), U8(0), U8(0),
+ B(CreateClosure), U8(2), U8(0), U8(0),
B(Star), R(1),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
- B(Ldar), R(4),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 54 S> */ B(Return),
- B(Mov), R(4), R(2),
+ B(Mov), R(3), R(2),
B(Ldar), R(2),
- B(Mov), R(context), R(4),
+ B(Mov), R(context), R(3),
/* 0 S> */ B(LdaSmi), I8(42),
- B(Star), R(6),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(5), U8(2),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaZero),
- B(TestReferenceEqual), R(6),
+ B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
- B(Ldar), R(5),
+ B(Ldar), R(4),
B(ReThrow),
/* 47 S> */ B(CallUndefinedReceiver0), R(1), U8(0),
B(LdaUndefined),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 54 S> */ B(Return),
- B(Star), R(5),
- B(CreateCatchContext), R(5), U8(6),
B(Star), R(4),
+ B(CreateCatchContext), R(4), U8(5),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(4),
- B(PushContext), R(5),
+ B(Ldar), R(3),
+ B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star), R(6),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [43],
- Smi [87],
- SCOPE_INFO_TYPE,
+ Smi [29],
+ Smi [73],
SHARED_FUNCTION_INFO_TYPE,
Smi [10],
Smi [7],
SCOPE_INFO_TYPE,
]
handlers: [
- [72, 125, 125],
+ [57, 110, 110],
]
---
@@ -263,87 +242,80 @@ snippet: "
import * as foo from \"bar\";
await import(\"goo\");
"
-frame size: 9
-parameter count: 2
-bytecode array length: 164
+frame size: 8
+parameter count: 1
+bytecode array length: 149
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
B(Star), R(0),
B(LdaZero),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(4), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(3), U8(1),
B(Star), R(1),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(4),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 49 S> */ B(Return),
- B(Mov), R(4), R(2),
+ B(Mov), R(3), R(2),
B(Ldar), R(2),
- B(Mov), R(context), R(4),
- /* 28 S> */ B(LdaConstant), U8(5),
- B(Star), R(6),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kDynamicImportCall), R(5), U8(2),
- B(Star), R(6),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(5), U8(2),
- /* 28 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Mov), R(context), R(3),
+ /* 28 S> */ B(LdaConstant), U8(4),
+ B(Star), R(5),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kDynamicImportCall), R(4), U8(2),
B(Star), R(5),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
+ /* 28 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaZero),
- B(TestReferenceEqual), R(6),
+ B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
- B(Ldar), R(5),
+ B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 49 S> */ B(Return),
- B(Star), R(5),
- B(CreateCatchContext), R(5), U8(6),
B(Star), R(4),
+ B(CreateCatchContext), R(4), U8(5),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(4),
- B(PushContext), R(5),
+ B(Ldar), R(3),
+ B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star), R(6),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [47],
- Smi [101],
- SCOPE_INFO_TYPE,
+ Smi [33],
+ Smi [87],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
SCOPE_INFO_TYPE,
]
handlers: [
- [76, 136, 136],
+ [61, 121, 121],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
index 8677bcd99b..61e3ff9e4f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
@@ -36,9 +35,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(11),
@@ -61,9 +59,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index a5f97d46c4..4d53c3afa5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 97 S> */ B(Return),
@@ -35,9 +34,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 86 S> */ B(Return),
@@ -61,17 +59,15 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 54
+bytecode array length: 52
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
/* 65 S> */ B(LdaSmi), I8(10),
/* 65 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(38),
- /* 56 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(37),
/* 75 S> */ B(Ldar), R(1),
/* 81 E> */ B(MulSmi), I8(12), U8(1),
B(Star), R(1),
@@ -86,7 +82,7 @@ bytecodes: [
/* 132 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 138 S> */ B(Jump), U8(5),
- B(JumpLoop), U8(40), I8(0),
+ /* 56 E> */ B(JumpLoop), U8(39), I8(0),
/* 147 S> */ B(Ldar), R(1),
/* 156 S> */ B(Return),
]
@@ -110,12 +106,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 62
+bytecode array length: 60
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 45 E> */ B(StackCheck),
/* 62 S> */ B(LdaZero),
/* 68 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(4),
@@ -139,7 +133,7 @@ bytecodes: [
/* 173 S> */ B(Ldar), R(0),
/* 179 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
- B(JumpLoop), U8(52), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(51), I8(0),
/* 186 S> */ B(Ldar), R(0),
/* 195 S> */ B(Return),
]
@@ -163,28 +157,26 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 44
+bytecode array length: 43
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 45 E> */ B(StackCheck),
/* 71 S> */ B(LdaSmi), I8(3),
/* 71 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(22),
- /* 62 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(20),
/* 82 S> */ B(LdaSmi), I8(2),
/* 88 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 94 S> */ B(Jump), U8(12),
+ /* 94 S> */ B(Jump), U8(11),
/* 105 S> */ B(Ldar), R(0),
/* 111 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(24), I8(1),
+ B(Jump), U8(11),
/* 122 S> */ B(Ldar), R(0),
/* 128 E> */ B(AddSmi), I8(1), U8(3),
B(Star), R(0),
- /* 135 S> */ B(Jump), U8(2),
+ /* 135 S> */ B(Jump), U8(5),
+ /* 45 E> */ B(JumpLoop), U8(34), I8(0),
/* 144 S> */ B(Ldar), R(0),
/* 153 S> */ B(Return),
]
@@ -205,23 +197,21 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 34
+bytecode array length: 32
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
/* 64 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalse), U8(20),
- /* 57 E> */ B(StackCheck),
+ B(JumpIfToBooleanFalse), U8(19),
/* 71 S> */ B(Ldar), R(1),
/* 77 E> */ B(MulSmi), I8(12), U8(0),
B(Star), R(1),
/* 85 S> */ B(Ldar), R(0),
/* 91 E> */ B(SubSmi), I8(1), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(19), I8(0),
+ /* 57 E> */ B(JumpLoop), U8(18), I8(0),
/* 98 S> */ B(Ldar), R(1),
/* 107 S> */ B(Return),
]
@@ -243,14 +233,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 54
+bytecode array length: 52
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
- /* 56 E> */ B(StackCheck),
/* 63 S> */ B(Ldar), R(1),
/* 69 E> */ B(MulSmi), I8(10), U8(0),
B(Star), R(1),
@@ -268,7 +256,7 @@ bytecodes: [
/* 144 S> */ B(LdaSmi), I8(10),
/* 144 E> */ B(TestLessThan), R(0), U8(4),
B(JumpIfFalse), U8(5),
- B(JumpLoop), U8(40), I8(0),
+ /* 56 E> */ B(JumpLoop), U8(39), I8(0),
/* 151 S> */ B(Ldar), R(1),
/* 160 S> */ B(Return),
]
@@ -289,14 +277,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 30
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
- /* 57 E> */ B(StackCheck),
/* 64 S> */ B(Ldar), R(1),
/* 70 E> */ B(MulSmi), I8(12), U8(0),
B(Star), R(1),
@@ -304,7 +290,7 @@ bytecodes: [
/* 84 E> */ B(SubSmi), I8(1), U8(1),
B(Star), R(0),
/* 98 S> */ B(JumpIfToBooleanFalse), U8(5),
- B(JumpLoop), U8(17), I8(0),
+ /* 57 E> */ B(JumpLoop), U8(16), I8(0),
/* 102 S> */ B(Ldar), R(1),
/* 111 S> */ B(Return),
]
@@ -326,14 +312,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 42
+bytecode array length: 40
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
- /* 56 E> */ B(StackCheck),
/* 69 S> */ B(MulSmi), I8(10), U8(0),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), I8(5),
@@ -368,14 +352,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 47
+bytecode array length: 45
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
- /* 56 E> */ B(StackCheck),
/* 63 S> */ B(Ldar), R(1),
/* 69 E> */ B(MulSmi), I8(10), U8(0),
B(Star), R(1),
@@ -390,7 +372,7 @@ bytecodes: [
/* 117 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 123 S> */ B(Jump), U8(2),
- B(JumpLoop), U8(33), I8(0),
+ /* 56 E> */ B(JumpLoop), U8(32), I8(0),
/* 149 S> */ B(Ldar), R(1),
/* 158 S> */ B(Return),
]
@@ -410,12 +392,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 35
+bytecode array length: 33
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 45 E> */ B(StackCheck),
/* 58 S> */ B(LdaSmi), I8(1),
/* 64 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
@@ -427,7 +407,7 @@ bytecodes: [
/* 103 S> */ B(Ldar), R(0),
/* 109 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(26), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(25), I8(0),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
@@ -446,12 +426,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 35
+bytecode array length: 33
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaZero),
B(Star), R(0),
- /* 34 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), I8(1),
/* 62 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
@@ -463,7 +441,7 @@ bytecodes: [
/* 101 S> */ B(Ldar), R(0),
/* 107 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(26), I8(0),
+ /* 34 E> */ B(JumpLoop), U8(25), I8(0),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
@@ -482,12 +460,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 35
+bytecode array length: 33
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 45 E> */ B(StackCheck),
/* 68 S> */ B(LdaSmi), I8(1),
/* 74 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
@@ -499,7 +475,7 @@ bytecodes: [
/* 55 S> */ B(Ldar), R(0),
/* 59 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(26), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(25), I8(0),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
@@ -517,12 +493,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 35
+bytecode array length: 33
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaZero),
B(Star), R(0),
- /* 34 E> */ B(StackCheck),
/* 66 S> */ B(LdaSmi), I8(1),
/* 72 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
@@ -534,7 +508,7 @@ bytecodes: [
/* 53 S> */ B(Ldar), R(0),
/* 57 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(26), I8(0),
+ /* 34 E> */ B(JumpLoop), U8(25), I8(0),
B(LdaUndefined),
/* 111 S> */ B(Return),
]
@@ -553,17 +527,15 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 36
+bytecode array length: 34
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 58 S> */ B(LdaZero),
B(Star), R(1),
/* 63 S> */ B(LdaSmi), I8(100),
/* 63 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(22),
- /* 45 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(21),
/* 85 S> */ B(Ldar), R(0),
/* 91 E> */ B(AddSmi), I8(1), U8(1),
B(Star), R(0),
@@ -571,7 +543,7 @@ bytecodes: [
/* 72 S> */ B(Ldar), R(1),
/* 76 E> */ B(AddSmi), I8(1), U8(2),
B(Star), R(1),
- B(JumpLoop), U8(24), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(23), I8(0),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
@@ -590,23 +562,21 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 58 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
/* 62 S> */ B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(19),
- /* 45 E> */ B(StackCheck),
+ B(JumpIfToBooleanFalse), U8(18),
/* 74 S> */ B(Ldar), R(0),
/* 80 E> */ B(MulSmi), I8(12), U8(0),
B(Star), R(0),
/* 67 S> */ B(Ldar), R(1),
B(Dec), U8(1),
B(Star), R(1),
- B(JumpLoop), U8(18), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(17), I8(0),
/* 88 S> */ B(Ldar), R(0),
/* 97 S> */ B(Return),
]
@@ -625,9 +595,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 58 S> */ B(LdaZero),
@@ -651,14 +620,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 36
+bytecode array length: 34
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 58 S> */ B(LdaZero),
B(Star), R(1),
- /* 45 E> */ B(StackCheck),
/* 76 S> */ B(Ldar), R(0),
/* 82 E> */ B(AddSmi), I8(1), U8(0),
B(Star), R(0),
@@ -669,7 +636,7 @@ bytecodes: [
/* 69 S> */ B(Ldar), R(1),
B(Inc), U8(2),
B(Star), R(1),
- B(JumpLoop), U8(23), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(22), I8(0),
/* 112 S> */ B(Ldar), R(0),
/* 121 S> */ B(Return),
]
@@ -692,14 +659,12 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 50
+bytecode array length: 48
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 52 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalse), U8(42),
- /* 45 E> */ B(StackCheck),
+ B(JumpIfToBooleanFalse), U8(41),
B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
@@ -717,7 +682,7 @@ bytecodes: [
B(Inc), U8(0),
/* 127 E> */ B(StaCurrentContextSlot), U8(2),
B(PopContext), R(3),
- B(JumpLoop), U8(41), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(40), I8(0),
B(LdaUndefined),
/* 137 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 18814169b9..e37a3ca0cf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -17,9 +17,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 62 S> */ B(AddSmi), I8(1), U8(0),
@@ -48,23 +47,20 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 69
+bytecode array length: 66
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaZero),
B(Star), R(0),
/* 71 S> */ B(LdaZero),
B(Star), R(1),
/* 76 S> */ B(LdaSmi), I8(10),
/* 76 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(54),
- /* 58 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(52),
/* 106 S> */ B(LdaZero),
B(Star), R(2),
/* 111 S> */ B(LdaSmi), I8(3),
/* 111 E> */ B(TestLessThan), R(2), U8(1),
- B(JumpIfFalse), U8(34),
- /* 93 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(33),
/* 129 S> */ B(Ldar), R(0),
B(Inc), U8(2),
B(Star), R(0),
@@ -78,11 +74,11 @@ bytecodes: [
/* 118 S> */ B(Ldar), R(2),
B(Inc), U8(5),
B(Star), R(2),
- B(JumpLoop), U8(36), I8(1),
+ /* 93 E> */ B(JumpLoop), U8(35), I8(1),
/* 84 S> */ B(Ldar), R(1),
B(Inc), U8(6),
B(Star), R(1),
- B(JumpLoop), U8(56), I8(0),
+ /* 58 E> */ B(JumpLoop), U8(54), I8(0),
/* 188 S> */ B(Ldar), R(0),
/* 199 S> */ B(Return),
]
@@ -101,10 +97,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -141,10 +136,9 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 53
+bytecode array length: 52
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index 4d3d1eb7d6..293ad72957 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(1),
/* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
@@ -38,9 +37,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 27
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(1),
/* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
@@ -67,9 +65,8 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 101
+bytecode array length: 100
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaNamedProperty), R(0), U8(1), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index 5c5c0ac00c..2dfa6dc4c0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
/* 39 E> */ B(CallUndefinedReceiver0), R(0), U8(2),
@@ -36,9 +35,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- /* 34 E> */ B(StackCheck),
/* 39 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index 4b48871fd2..1e5e0a2c8e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -11,10 +11,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 75
+bytecode array length: 74
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index 4e7e6d3190..8674673d3d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 45 E> */ B(StackCheck),
/* 50 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
/* 57 E> */ B(Construct), R(0), R(0), U8(0), U8(2),
@@ -36,9 +35,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 58 E> */ B(StackCheck),
/* 63 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(3),
@@ -66,9 +64,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 26
+bytecode array length: 25
bytecodes: [
- /* 100 E> */ B(StackCheck),
/* 105 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index 761436decb..32a9a902ae 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
B(LdaUndefined),
/* 26 S> */ B(Return),
@@ -32,9 +31,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 16 S> */ B(CallRuntime), U16(Runtime::kIsArray), R(arg0), U8(1),
/* 34 S> */ B(Return),
]
@@ -50,9 +48,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
B(LdaSmi), I8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 7021630123..b4425f57fe 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -22,9 +22,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 34
+bytecode array length: 33
bytecodes: [
- /* 99 E> */ B(StackCheck),
B(Mov), R(closure), R(0),
/* 104 S> */ B(LdaConstant), U8(0),
/* 111 E> */ B(LdaKeyedProperty), R(closure), U8(1),
@@ -62,9 +61,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 47
+bytecode array length: 46
bytecodes: [
- /* 125 E> */ B(StackCheck),
B(Mov), R(closure), R(0),
/* 130 S> */ B(LdaConstant), U8(0),
/* 130 E> */ B(LdaKeyedProperty), R(closure), U8(0),
@@ -106,9 +104,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 40
+bytecode array length: 39
bytecodes: [
- /* 113 E> */ B(StackCheck),
B(Mov), R(closure), R(1),
/* 118 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(3),
@@ -149,9 +146,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 112 E> */ B(StackCheck),
B(Mov), R(closure), R(1),
/* 117 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index d785701c0d..b16056a344 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -14,10 +14,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 41
+bytecode array length: 40
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -53,10 +52,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 41
+bytecode array length: 40
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -94,10 +92,9 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 84
+bytecode array length: 83
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(2),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(2),
B(PushContext), R(1),
/* 43 S> */ B(LdaConstant), U8(1),
/* 43 E> */ B(StaCurrentContextSlot), U8(2),
@@ -153,10 +150,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 49
+bytecode array length: 48
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
/* 46 S> */ B(LdaZero),
/* 46 E> */ B(StaCurrentContextSlot), U8(2),
@@ -193,9 +189,8 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 70
+bytecode array length: 69
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
index d9413a1866..848c420967 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
@@ -12,9 +12,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(TestNull),
@@ -32,9 +31,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaUndefined),
B(Star), R(0),
/* 53 S> */ B(TestUndefined),
@@ -52,9 +50,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaUndefined),
B(Star), R(0),
/* 53 S> */ B(TestUndefined),
@@ -73,9 +70,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(2),
B(Star), R(0),
/* 45 S> */ B(TestUndetectable),
@@ -94,9 +90,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaUndefined),
B(Star), R(0),
/* 53 S> */ B(TestUndetectable),
@@ -114,9 +109,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaUndefined),
B(Star), R(0),
/* 53 S> */ B(JumpIfNotUndefined), U8(6),
@@ -137,9 +131,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(TestUndetectable),
@@ -161,9 +154,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfUndefined), U8(6),
@@ -184,9 +176,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfNotNull), U8(6),
@@ -211,9 +202,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfNotNull), U8(5),
@@ -236,9 +226,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(TestUndetectable),
@@ -263,20 +252,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaUndefined),
B(Star), R(0),
/* 61 S> */ B(LdaZero),
B(Star), R(1),
/* 73 S> */ B(Ldar), R(0),
- B(JumpIfUndefined), U8(12),
- /* 64 E> */ B(StackCheck),
+ B(JumpIfUndefined), U8(11),
/* 92 S> */ B(Ldar), R(1),
B(Inc), U8(0),
B(Star), R(1),
- B(JumpLoop), U8(11), I8(0),
+ /* 64 E> */ B(JumpLoop), U8(10), I8(0),
B(LdaUndefined),
/* 99 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareTypeOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareTypeOf.golden
index 41108b12c8..9349f993dc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareTypeOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareTypeOf.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(1),
B(TestTypeOf), U8(0),
/* 64 S> */ B(Return),
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaConstant), U8(0),
B(TestTypeOf), U8(1),
/* 68 S> */ B(Return),
@@ -48,9 +46,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaTrue),
B(TestTypeOf), U8(3),
/* 67 S> */ B(Return),
@@ -66,9 +63,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaUndefined),
B(TestTypeOf), U8(1),
/* 72 S> */ B(Return),
@@ -84,9 +80,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaFalse),
/* 73 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index b8e9ebdb04..a040decf23 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(AddSmi), I8(2), U8(0),
@@ -32,9 +31,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(DivSmi), I8(2), U8(0),
@@ -53,9 +51,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
@@ -77,9 +74,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 52 S> */ B(LdaSmi), I8(1),
@@ -102,10 +98,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
index 2f82f7dda1..cd8f5de491 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(2),
/* 51 S> */ B(Return),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(3),
/* 59 S> */ B(Return),
]
@@ -45,9 +43,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaZero),
B(Star), R(0),
B(LdaSmi), I8(1),
@@ -70,9 +67,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
index 0393f7407e..d5240602e1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
B(LdaUndefined),
@@ -30,9 +29,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 57 S> */ B(Return),
@@ -48,9 +46,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 20
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(LdaTheHole),
B(Star), R(0),
/* 44 S> */ B(LdaSmi), I8(20),
@@ -74,9 +71,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 48 S> */ B(LdaSmi), I8(20),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index 3f07cbfd5f..8250d98b0f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -11,10 +11,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -35,10 +34,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -59,10 +57,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 25
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -88,10 +85,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
index c4fa209b33..d6a5199e52 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -13,10 +13,9 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
@@ -37,10 +36,9 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
@@ -63,10 +61,9 @@ snippet: "
"
frame size: 1
parameter count: 5
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(2),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(2),
B(PushContext), R(0),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(3),
@@ -89,10 +86,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 26 S> */ B(Ldar), R(this),
/* 26 E> */ B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index 88be31af43..73d710b2fa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -11,10 +11,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 41 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
/* 70 S> */ B(Return),
@@ -32,10 +31,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
@@ -55,10 +53,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(2),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(2),
B(PushContext), R(0),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
@@ -80,10 +77,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 41 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(Star), R(1),
@@ -106,10 +102,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -393,10 +388,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 797
+bytecode array length: 796
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(Wide), B(CreateFunctionContext), U16(0), U16(256),
+ /* 30 E> */ B(Wide), B(CreateFunctionContext), U16(0), U16(256),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 7a397e95af..0edc219769 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Inc), U8(0),
@@ -31,9 +30,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(ToNumeric), U8(0),
@@ -54,9 +52,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Dec), U8(0),
@@ -74,9 +71,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(ToNumeric), U8(0),
@@ -97,9 +93,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 26
+bytecode array length: 25
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
@@ -124,9 +119,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
@@ -149,9 +143,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 31
+bytecode array length: 30
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
@@ -179,9 +172,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
@@ -207,10 +199,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
@@ -234,10 +225,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
@@ -264,9 +254,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index cc4909beff..3bb8f8ac2c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -13,10 +13,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 10 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 32 S> */ B(Return),
]
@@ -32,10 +31,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 10 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 15 S> */ B(LdaZero),
/* 31 E> */ B(LdaKeyedProperty), R(0), U8(0),
@@ -53,10 +51,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateUnmappedArguments),
+ /* 10 E> */ B(CreateUnmappedArguments),
B(Star), R(0),
/* 46 S> */ B(Return),
]
@@ -72,10 +69,9 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
@@ -98,10 +94,9 @@ snippet: "
"
frame size: 2
parameter count: 4
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(3),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(3),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
@@ -126,10 +121,9 @@ snippet: "
"
frame size: 1
parameter count: 4
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateUnmappedArguments),
+ /* 10 E> */ B(CreateUnmappedArguments),
B(Star), R(0),
/* 53 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index 14ce4e20a4..a0063ba5c4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -13,10 +13,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateRestParameter),
+ /* 10 E> */ B(CreateRestParameter),
B(Star), R(1),
B(Star), R(0),
/* 42 S> */ B(Return),
@@ -33,10 +32,9 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateRestParameter),
+ /* 10 E> */ B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
@@ -55,10 +53,9 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateRestParameter),
+ /* 10 E> */ B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
@@ -78,10 +75,9 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateUnmappedArguments),
+ /* 10 E> */ B(CreateUnmappedArguments),
B(Star), R(3),
B(CreateRestParameter),
B(Star), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
index e164e29ab9..3d95c9c4f4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaUndefined),
/* 41 S> */ B(Return),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 66 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
B(LdaUndefined),
@@ -47,9 +45,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaSmi), I8(1),
/* 55 S> */ B(Return),
]
@@ -64,9 +61,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index fb36d01f98..27be2b47bd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -12,13 +12,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(LdaConstant), U8(0),
B(Star), R(1),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 8 S> */ B(LdaSmi), I8(1),
/* 8 E> */ B(StaGlobal), U8(1), U8(0),
B(LdaUndefined),
@@ -37,13 +36,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(LdaConstant), U8(0),
B(Star), R(0),
B(Mov), R(closure), R(1),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2),
B(LdaUndefined),
/* 16 S> */ B(Return),
]
@@ -60,13 +58,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 26
+bytecode array length: 25
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(LdaConstant), U8(0),
B(Star), R(1),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 8 S> */ B(LdaSmi), I8(1),
/* 8 E> */ B(StaGlobal), U8(1), U8(0),
/* 11 S> */ B(LdaSmi), I8(2),
@@ -88,13 +85,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(LdaConstant), U8(0),
B(Star), R(1),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 16 S> */ B(LdaGlobal), U8(1), U8(0),
B(Star), R(1),
/* 16 E> */ B(CallUndefinedReceiver0), R(1), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index 2d1ad15fe2..3af61115bd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 56 S> */ B(LdaConstant), U8(1),
@@ -33,9 +32,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 70 S> */ B(LdaConstant), U8(1),
@@ -55,9 +53,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 56 S> */ B(LdaSmi), I8(2),
@@ -76,9 +73,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(LdaFalse),
@@ -98,10 +94,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 56 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
/* 56 E> */ B(StaCurrentContextSlot), U8(2),
@@ -126,9 +121,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaTrue),
/* 55 S> */ B(Return),
]
@@ -143,9 +137,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaTrue),
/* 53 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
index 587ed9b2ba..bff4c7a5da 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
@@ -19,9 +19,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
@@ -47,9 +46,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaFalse),
/* 31 S> */ B(Return),
]
@@ -71,9 +69,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 8f23cff964..4bb89c6179 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -12,9 +12,8 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 167
+bytecode array length: 166
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(1),
/* 60 S> */ B(GetIterator), R(1), U8(1), U8(3),
@@ -99,8 +98,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [35, 77, 85],
- [109, 142, 144],
+ [34, 76, 84],
+ [108, 141, 143],
]
---
@@ -110,9 +109,8 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 253
+bytecode array length: 252
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(2),
/* 69 S> */ B(GetIterator), R(2), U8(1), U8(3),
@@ -231,8 +229,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [35, 163, 171],
- [195, 228, 230],
+ [34, 162, 170],
+ [194, 227, 229],
]
---
@@ -242,9 +240,8 @@ snippet: "
"
frame size: 16
parameter count: 1
-bytecode array length: 218
+bytecode array length: 217
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 40 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 51 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
@@ -351,8 +348,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [38, 128, 136],
- [160, 193, 195],
+ [37, 127, 135],
+ [159, 192, 194],
]
---
@@ -362,9 +359,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(1),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
@@ -386,9 +382,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 40 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 48 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
@@ -413,9 +408,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 33
+bytecode array length: 32
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(1),
/* 64 S> */ B(LdaConstant), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
index 60e585f974..5bf3618bed 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(Debugger),
B(LdaUndefined),
/* 44 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index a8609261a7..bb695e0c35 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -11,10 +11,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 59
+bytecode array length: 58
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index df9074524b..866694aa03 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,13 +16,12 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 321
+bytecode array length: 319
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
B(Star), R(0),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
@@ -65,16 +64,15 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(6), U8(13),
- B(JumpIfToBooleanTrue), U8(23),
+ B(JumpIfToBooleanTrue), U8(22),
B(LdaNamedProperty), R(11), U8(7), U8(15),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
B(Mov), R(11), R(1),
- /* 23 E> */ B(StackCheck),
/* 38 S> */ B(Mov), R(1), R(3),
B(Ldar), R(11),
- B(JumpLoop), U8(77), I8(0),
+ /* 23 E> */ B(JumpLoop), U8(76), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(9),
B(Star), R(8),
@@ -83,7 +81,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(8),
B(LdaTheHole),
- /* 38 E> */ B(SetPendingMessage),
+ B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(94),
@@ -155,7 +153,7 @@ bytecodes: [
]
constant pool: [
Smi [95],
- Smi [224],
+ Smi [223],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -167,9 +165,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 293, 293],
- [75, 155, 163],
- [187, 256, 258],
+ [19, 291, 291],
+ [74, 153, 161],
+ [185, 254, 256],
]
---
@@ -181,13 +179,12 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 342
+bytecode array length: 340
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
B(Star), R(0),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
@@ -230,13 +227,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(6), U8(13),
- B(JumpIfToBooleanTrue), U8(27),
+ B(JumpIfToBooleanTrue), U8(26),
B(LdaNamedProperty), R(11), U8(7), U8(15),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
B(Mov), R(11), R(1),
- /* 23 E> */ B(StackCheck),
/* 38 S> */ B(Mov), R(1), R(3),
/* 56 S> */ B(LdaSmi), I8(1),
B(Mov), R(11), R(9),
@@ -328,7 +324,7 @@ bytecodes: [
]
constant pool: [
Smi [95],
- Smi [228],
+ Smi [227],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -342,9 +338,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 314, 314],
- [75, 159, 167],
- [191, 260, 262],
+ [19, 312, 312],
+ [74, 157, 165],
+ [189, 258, 260],
]
---
@@ -359,13 +355,12 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 337
+bytecode array length: 335
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
B(Star), R(0),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
@@ -408,13 +403,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(6), U8(13),
- B(JumpIfToBooleanTrue), U8(39),
+ B(JumpIfToBooleanTrue), U8(38),
B(LdaNamedProperty), R(11), U8(7), U8(15),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
B(Mov), R(11), R(1),
- /* 23 E> */ B(StackCheck),
/* 38 S> */ B(Mov), R(1), R(3),
/* 63 S> */ B(LdaSmi), I8(10),
/* 69 E> */ B(TestEqual), R(3), U8(17),
@@ -424,7 +418,7 @@ bytecodes: [
/* 96 E> */ B(TestEqual), R(3), U8(18),
B(JumpIfFalse), U8(4),
/* 103 S> */ B(Jump), U8(5),
- B(JumpLoop), U8(93), I8(0),
+ /* 23 E> */ B(JumpLoop), U8(92), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(9),
B(Star), R(8),
@@ -505,7 +499,7 @@ bytecodes: [
]
constant pool: [
Smi [95],
- Smi [240],
+ Smi [239],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -517,9 +511,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 309, 309],
- [75, 171, 179],
- [203, 272, 274],
+ [19, 307, 307],
+ [74, 169, 177],
+ [201, 270, 272],
]
---
@@ -532,12 +526,11 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 253
+bytecode array length: 251
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
B(Star), R(0),
B(Mov), R(context), R(2),
/* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
@@ -560,14 +553,13 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(LdaNamedProperty), R(9), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(33),
+ B(JumpIfToBooleanTrue), U8(32),
B(LdaNamedProperty), R(9), U8(4), U8(12),
B(Star), R(9),
B(LdaFalse),
B(Star), R(5),
B(Ldar), R(9),
/* 58 E> */ B(StaNamedProperty), R(1), U8(5), U8(14),
- /* 53 E> */ B(StackCheck),
/* 87 S> */ B(LdaNamedProperty), R(1), U8(5), U8(16),
B(Star), R(7),
B(LdaSmi), I8(1),
@@ -658,8 +650,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 225, 225],
- [53, 106, 114],
- [138, 171, 173],
+ [15, 223, 223],
+ [52, 104, 112],
+ [136, 169, 171],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index fe1defeefb..680e5ee5cf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(LdaUndefined),
/* 57 S> */ B(Return),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(LdaUndefined),
/* 62 S> */ B(Return),
]
@@ -45,9 +43,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(LdaUndefined),
/* 62 S> */ B(Return),
]
@@ -63,28 +60,26 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 44
+bytecode array length: 42
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 68 S> */ B(JumpIfUndefinedOrNull), U8(37),
+ /* 68 S> */ B(JumpIfUndefinedOrNull), U8(36),
B(ToObject), R(3),
B(ForInEnumerate), R(3),
B(ForInPrepare), R(4), U8(0),
B(LdaZero),
B(Star), R(7),
/* 63 S> */ B(ForInContinue), R(7), R(6),
- B(JumpIfFalse), U8(22),
+ B(JumpIfFalse), U8(21),
B(ForInNext), R(3), R(7), R(4), U8(0),
- B(JumpIfUndefined), U8(8),
+ B(JumpIfUndefined), U8(7),
B(Star), R(2),
- /* 54 E> */ B(StackCheck),
/* 63 S> */ B(Star), R(1),
/* 82 S> */ B(Return),
B(ForInStep), R(7),
B(Star), R(7),
- B(JumpLoop), U8(22), I8(0),
+ /* 54 E> */ B(JumpLoop), U8(21), I8(0),
B(LdaUndefined),
/* 85 S> */ B(Return),
]
@@ -101,24 +96,22 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 56
+bytecode array length: 54
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 59 S> */ B(CreateArrayLiteral), U8(0), U8(1), U8(37),
- B(JumpIfUndefinedOrNull), U8(46),
+ B(JumpIfUndefinedOrNull), U8(45),
B(ToObject), R(3),
B(ForInEnumerate), R(3),
B(ForInPrepare), R(4), U8(0),
B(LdaZero),
B(Star), R(7),
/* 54 S> */ B(ForInContinue), R(7), R(6),
- B(JumpIfFalse), U8(31),
+ B(JumpIfFalse), U8(30),
B(ForInNext), R(3), R(7), R(4), U8(0),
- B(JumpIfUndefined), U8(17),
+ B(JumpIfUndefined), U8(16),
B(Star), R(2),
- /* 45 E> */ B(StackCheck),
/* 54 S> */ B(Star), R(1),
/* 70 S> */ B(Ldar), R(2),
/* 75 E> */ B(Add), R(0), U8(2),
@@ -126,7 +119,7 @@ bytecodes: [
B(Star), R(0),
/* 72 E> */ B(ForInStep), R(7),
B(Star), R(7),
- B(JumpLoop), U8(31), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(30), I8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
@@ -146,26 +139,24 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 83
+bytecode array length: 81
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
- B(JumpIfUndefinedOrNull), U8(70),
+ B(JumpIfUndefinedOrNull), U8(69),
B(ToObject), R(1),
B(ForInEnumerate), R(1),
B(ForInPrepare), R(2), U8(1),
B(LdaZero),
B(Star), R(5),
/* 68 S> */ B(ForInContinue), R(5), R(4),
- B(JumpIfFalse), U8(55),
+ B(JumpIfFalse), U8(54),
B(ForInNext), R(1), R(5), R(2), U8(1),
- B(JumpIfUndefined), U8(41),
+ B(JumpIfUndefined), U8(40),
B(Star), R(6),
B(Ldar), R(6),
/* 68 E> */ B(StaNamedProperty), R(0), U8(2), U8(3),
- /* 62 E> */ B(StackCheck),
/* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
B(Star), R(6),
B(LdaSmi), I8(10),
@@ -180,7 +171,7 @@ bytecodes: [
/* 143 S> */ B(Jump), U8(9),
B(ForInStep), R(5),
B(Star), R(5),
- B(JumpLoop), U8(55), I8(0),
+ /* 62 E> */ B(JumpLoop), U8(54), I8(0),
B(LdaUndefined),
/* 152 S> */ B(Return),
]
@@ -199,34 +190,32 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 62
+bytecode array length: 60
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(0),
/* 72 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
- B(JumpIfUndefinedOrNull), U8(49),
+ B(JumpIfUndefinedOrNull), U8(48),
B(ToObject), R(1),
B(ForInEnumerate), R(1),
B(ForInPrepare), R(2), U8(1),
B(LdaZero),
B(Star), R(5),
/* 65 S> */ B(ForInContinue), R(5), R(4),
- B(JumpIfFalse), U8(34),
+ B(JumpIfFalse), U8(33),
B(ForInNext), R(1), R(5), R(2), U8(1),
- B(JumpIfUndefined), U8(20),
+ B(JumpIfUndefined), U8(19),
B(Star), R(6),
B(LdaZero),
B(Star), R(8),
B(Ldar), R(6),
/* 65 E> */ B(StaKeyedProperty), R(0), R(8), U8(3),
- /* 59 E> */ B(StackCheck),
/* 83 S> */ B(LdaSmi), I8(3),
/* 91 E> */ B(LdaKeyedProperty), R(0), U8(5),
/* 95 S> */ B(Return),
B(ForInStep), R(5),
B(Star), R(5),
- B(JumpLoop), U8(34), I8(0),
+ /* 59 E> */ B(JumpLoop), U8(33), I8(0),
B(LdaUndefined),
/* 98 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 7be4c0bdea..681b4bc9f5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 165
+bytecode array length: 163
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(4),
B(GetIterator), R(4), U8(1), U8(3),
@@ -32,16 +31,15 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(LdaNamedProperty), R(8), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(23),
+ B(JumpIfToBooleanTrue), U8(22),
B(LdaNamedProperty), R(8), U8(3), U8(11),
B(Star), R(8),
B(LdaFalse),
B(Star), R(4),
B(Mov), R(8), R(1),
- /* 34 E> */ B(StackCheck),
/* 43 S> */ B(Mov), R(1), R(0),
B(Ldar), R(8),
- B(JumpLoop), U8(40), I8(0),
+ /* 34 E> */ B(JumpLoop), U8(39), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(6),
B(Star), R(5),
@@ -50,7 +48,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(5),
B(LdaTheHole),
- /* 43 E> */ B(SetPendingMessage),
+ B(SetPendingMessage),
B(Star), R(7),
B(Ldar), R(4),
B(JumpIfToBooleanTrue), U8(58),
@@ -96,8 +94,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [32, 75, 83],
- [107, 140, 142],
+ [31, 73, 81],
+ [105, 138, 140],
]
---
@@ -107,9 +105,8 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 173
+bytecode array length: 171
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 68 S> */ B(GetIterator), R(0), U8(0), U8(2),
@@ -128,13 +125,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(LdaNamedProperty), R(9), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(27),
+ B(JumpIfToBooleanTrue), U8(26),
B(LdaNamedProperty), R(9), U8(3), U8(10),
B(Star), R(9),
B(LdaFalse),
B(Star), R(5),
B(Mov), R(9), R(2),
- /* 54 E> */ B(StackCheck),
/* 63 S> */ B(Mov), R(2), R(1),
/* 73 S> */ B(LdaSmi), I8(1),
B(Mov), R(9), R(7),
@@ -198,8 +194,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [30, 77, 85],
- [109, 142, 144],
+ [29, 75, 83],
+ [107, 140, 142],
]
---
@@ -211,9 +207,8 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 181
+bytecode array length: 179
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(4),
B(GetIterator), R(4), U8(1), U8(3),
@@ -232,13 +227,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(LdaNamedProperty), R(8), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(39),
+ B(JumpIfToBooleanTrue), U8(38),
B(LdaNamedProperty), R(8), U8(3), U8(11),
B(Star), R(8),
B(LdaFalse),
B(Star), R(4),
B(Mov), R(8), R(1),
- /* 34 E> */ B(StackCheck),
/* 43 S> */ B(Mov), R(1), R(0),
/* 66 S> */ B(LdaSmi), I8(10),
/* 72 E> */ B(TestEqual), R(0), U8(13),
@@ -248,7 +242,7 @@ bytecodes: [
/* 97 E> */ B(TestEqual), R(0), U8(14),
B(JumpIfFalse), U8(4),
/* 104 S> */ B(Jump), U8(5),
- B(JumpLoop), U8(56), I8(0),
+ /* 34 E> */ B(JumpLoop), U8(55), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(6),
B(Star), R(5),
@@ -303,8 +297,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [32, 91, 99],
- [123, 156, 158],
+ [31, 89, 97],
+ [121, 154, 156],
]
---
@@ -314,9 +308,8 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 187
+bytecode array length: 185
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
@@ -337,14 +330,13 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
B(LdaNamedProperty), R(7), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(33),
+ B(JumpIfToBooleanTrue), U8(32),
B(LdaNamedProperty), R(7), U8(4), U8(12),
B(Star), R(7),
B(LdaFalse),
B(Star), R(3),
B(Ldar), R(7),
/* 67 E> */ B(StaNamedProperty), R(0), U8(5), U8(14),
- /* 62 E> */ B(StackCheck),
/* 96 S> */ B(LdaNamedProperty), R(0), U8(5), U8(16),
B(Star), R(5),
B(LdaSmi), I8(1),
@@ -411,7 +403,7 @@ constant pool: [
Smi [9],
]
handlers: [
- [38, 91, 99],
- [123, 156, 158],
+ [37, 89, 97],
+ [121, 154, 156],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 1f1cf6a332..a38d3d78dd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -15,9 +15,8 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 162
+bytecode array length: 160
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -34,17 +33,16 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(26),
+ B(JumpIfToBooleanTrue), U8(25),
B(LdaNamedProperty), R(10), U8(2), U8(10),
B(Star), R(10),
B(LdaFalse),
B(Star), R(6),
B(Mov), R(10), R(0),
- /* 20 E> */ B(StackCheck),
/* 29 S> */ B(Mov), R(0), R(2),
/* 49 S> */ B(Mov), R(2), R(3),
B(Ldar), R(10),
- B(JumpLoop), U8(43), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(42), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(8),
B(Star), R(7),
@@ -53,7 +51,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(7),
B(LdaTheHole),
- /* 49 E> */ B(SetPendingMessage),
+ B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
B(JumpIfToBooleanTrue), U8(58),
@@ -98,8 +96,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [26, 72, 80],
- [104, 137, 139],
+ [25, 70, 78],
+ [102, 135, 137],
]
---
@@ -111,10 +109,9 @@ snippet: "
"
frame size: 20
parameter count: 2
-bytecode array length: 246
+bytecode array length: 244
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(5),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(5),
B(PushContext), R(2),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(4),
@@ -146,13 +143,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(3), U8(8),
- B(JumpIfToBooleanTrue), U8(75),
+ B(JumpIfToBooleanTrue), U8(74),
B(LdaNamedProperty), R(10), U8(4), U8(10),
B(Star), R(10),
B(LdaFalse),
B(Star), R(6),
B(Mov), R(10), R(0),
- /* 20 E> */ B(StackCheck),
B(CreateBlockContext), U8(5),
B(PushContext), R(11),
B(LdaTheHole),
@@ -177,7 +173,7 @@ bytecodes: [
/* 41 E> */ B(CallUndefinedReceiver1), R(12), R(13), U8(14),
B(PopContext), R(11),
B(Mov), R(0), R(10),
- B(JumpLoop), U8(92), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(91), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(8),
B(Star), R(7),
@@ -237,8 +233,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [59, 154, 162],
- [186, 219, 221],
+ [58, 152, 160],
+ [184, 217, 219],
]
---
@@ -250,9 +246,8 @@ snippet: "
"
frame size: 14
parameter count: 2
-bytecode array length: 179
+bytecode array length: 177
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -269,13 +264,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(LdaNamedProperty), R(8), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(43),
+ B(JumpIfToBooleanTrue), U8(42),
B(LdaNamedProperty), R(8), U8(2), U8(10),
B(Star), R(8),
B(LdaFalse),
B(Star), R(4),
B(Mov), R(8), R(0),
- /* 20 E> */ B(StackCheck),
B(CreateBlockContext), U8(3),
B(PushContext), R(9),
B(LdaTheHole),
@@ -287,7 +281,7 @@ bytecodes: [
/* 67 E> */ B(CallUndefinedReceiver0), R(10), U8(12),
B(PopContext), R(9),
B(Mov), R(0), R(8),
- B(JumpLoop), U8(60), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(59), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(6),
B(Star), R(5),
@@ -343,8 +337,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [26, 89, 97],
- [121, 154, 156],
+ [25, 87, 95],
+ [119, 152, 154],
]
---
@@ -356,9 +350,8 @@ snippet: "
"
frame size: 17
parameter count: 2
-bytecode array length: 173
+bytecode array length: 171
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 41 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -375,13 +368,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(LdaNamedProperty), R(12), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(37),
+ B(JumpIfToBooleanTrue), U8(36),
B(LdaNamedProperty), R(12), U8(2), U8(10),
B(Star), R(12),
B(LdaFalse),
B(Star), R(8),
B(Mov), R(12), R(0),
- /* 20 E> */ B(StackCheck),
/* 31 S> */ B(LdaNamedProperty), R(0), U8(3), U8(12),
B(Star), R(3),
/* 34 S> */ B(LdaNamedProperty), R(0), U8(4), U8(14),
@@ -389,7 +381,7 @@ bytecodes: [
/* 56 S> */ B(Ldar), R(4),
/* 58 E> */ B(Add), R(3), U8(16),
B(Star), R(5),
- B(JumpLoop), U8(54), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(53), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(10),
B(Star), R(9),
@@ -398,7 +390,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(9),
B(LdaTheHole),
- /* 56 E> */ B(SetPendingMessage),
+ B(SetPendingMessage),
B(Star), R(11),
B(Ldar), R(8),
B(JumpIfToBooleanTrue), U8(58),
@@ -445,8 +437,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [26, 83, 91],
- [115, 148, 150],
+ [25, 81, 89],
+ [113, 146, 148],
]
---
@@ -458,13 +450,12 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 203
+bytecode array length: 201
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
B(ResumeGenerator), R(0), R(0), U8(5),
@@ -491,17 +482,16 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(4), U8(8),
- B(JumpIfToBooleanTrue), U8(26),
+ B(JumpIfToBooleanTrue), U8(25),
B(LdaNamedProperty), R(11), U8(5), U8(10),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
B(Mov), R(11), R(1),
- /* 21 E> */ B(StackCheck),
/* 30 S> */ B(Mov), R(1), R(3),
/* 50 S> */ B(Mov), R(3), R(4),
B(Ldar), R(11),
- B(JumpLoop), U8(43), I8(0),
+ /* 21 E> */ B(JumpLoop), U8(42), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(9),
B(Star), R(8),
@@ -510,7 +500,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(8),
B(LdaTheHole),
- /* 50 E> */ B(SetPendingMessage),
+ B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
B(JumpIfToBooleanTrue), U8(58),
@@ -558,8 +548,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [67, 113, 121],
- [145, 178, 180],
+ [66, 111, 119],
+ [143, 176, 178],
]
---
@@ -571,13 +561,12 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 247
+bytecode array length: 245
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
B(ResumeGenerator), R(0), R(0), U8(4),
@@ -604,13 +593,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(5), U8(8),
- B(JumpIfToBooleanTrue), U8(64),
+ B(JumpIfToBooleanTrue), U8(63),
B(LdaNamedProperty), R(10), U8(6), U8(10),
B(Star), R(10),
B(LdaFalse),
B(Star), R(6),
B(Mov), R(10), R(1),
- /* 21 E> */ B(StackCheck),
/* 30 S> */ B(Mov), R(1), R(3),
/* 40 S> */ B(LdaFalse),
B(Star), R(12),
@@ -628,7 +616,7 @@ bytecodes: [
B(Mov), R(11), R(8),
B(Jump), U8(20),
B(Ldar), R(11),
- B(JumpLoop), U8(81), I8(0),
+ /* 21 E> */ B(JumpLoop), U8(80), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(8),
B(Star), R(7),
@@ -678,7 +666,7 @@ bytecodes: [
]
constant pool: [
Smi [21],
- Smi [119],
+ Smi [118],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
@@ -692,8 +680,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [67, 151, 159],
- [183, 216, 218],
+ [66, 149, 157],
+ [181, 214, 216],
]
---
@@ -705,12 +693,11 @@ snippet: "
"
frame size: 17
parameter count: 2
-bytecode array length: 217
+bytecode array length: 215
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(5), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(5), U8(2),
B(Star), R(0),
B(Mov), R(context), R(5),
/* 40 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
@@ -729,17 +716,16 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(LdaNamedProperty), R(12), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(26),
+ B(JumpIfToBooleanTrue), U8(25),
B(LdaNamedProperty), R(12), U8(2), U8(10),
B(Star), R(12),
B(LdaFalse),
B(Star), R(8),
B(Mov), R(12), R(1),
- /* 26 E> */ B(StackCheck),
/* 35 S> */ B(Mov), R(1), R(3),
/* 55 S> */ B(Mov), R(3), R(4),
B(Ldar), R(12),
- B(JumpLoop), U8(43), I8(0),
+ /* 26 E> */ B(JumpLoop), U8(42), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(10),
B(Star), R(9),
@@ -748,7 +734,7 @@ bytecodes: [
B(LdaZero),
B(Star), R(9),
B(LdaTheHole),
- /* 55 E> */ B(SetPendingMessage),
+ B(SetPendingMessage),
B(Star), R(11),
B(Ldar), R(8),
B(JumpIfToBooleanTrue), U8(58),
@@ -813,9 +799,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 189, 189],
- [41, 87, 95],
- [119, 152, 154],
+ [15, 187, 187],
+ [40, 85, 93],
+ [117, 150, 152],
]
---
@@ -827,13 +813,12 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 253
+bytecode array length: 251
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
B(Star), R(0),
B(Mov), R(context), R(4),
/* 40 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
@@ -852,13 +837,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(57),
B(LdaNamedProperty), R(11), U8(3), U8(10),
B(Star), R(11),
B(LdaFalse),
B(Star), R(7),
B(Mov), R(11), R(1),
- /* 26 E> */ B(StackCheck),
/* 35 S> */ B(Mov), R(1), R(3),
/* 45 S> */ B(Mov), R(0), R(12),
B(Mov), R(3), R(13),
@@ -874,7 +858,7 @@ bytecodes: [
B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(12),
- B(JumpLoop), U8(75), I8(0),
+ /* 26 E> */ B(JumpLoop), U8(74), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(9),
B(Star), R(8),
@@ -940,7 +924,7 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [97],
+ Smi [96],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -949,8 +933,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 225, 225],
- [45, 123, 131],
- [155, 188, 190],
+ [19, 223, 223],
+ [44, 121, 129],
+ [153, 186, 188],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index 0416d1815a..d73ca7d69c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
/* 54 S> */ B(Return),
]
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(0),
/* 56 E> */ B(CallUndefinedReceiver0), R(0), U8(0),
@@ -49,9 +47,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(0),
B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
index 02f8f49ece..071b6ae8ff 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -44,9 +43,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -74,9 +72,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -104,9 +101,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -134,9 +130,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -163,9 +158,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -192,9 +186,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -221,9 +214,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index c84e6ec0eb..c3f8b980cf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -13,13 +13,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 44
+bytecode array length: 43
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
B(ResumeGenerator), R(0), R(0), U8(1),
@@ -48,13 +47,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 80
+bytecode array length: 79
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
B(ResumeGenerator), R(0), R(0), U8(1),
@@ -100,13 +98,12 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 253
+bytecode array length: 251
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
B(ResumeGenerator), R(0), R(0), U8(4),
@@ -135,13 +132,12 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(6), U8(9),
- B(JumpIfToBooleanTrue), U8(64),
+ B(JumpIfToBooleanTrue), U8(63),
B(LdaNamedProperty), R(10), U8(7), U8(11),
B(Star), R(10),
B(LdaFalse),
B(Star), R(6),
B(Mov), R(10), R(1),
- /* 16 E> */ B(StackCheck),
/* 25 S> */ B(Mov), R(1), R(3),
/* 36 S> */ B(LdaFalse),
B(Star), R(12),
@@ -159,7 +155,7 @@ bytecodes: [
B(Mov), R(11), R(8),
B(Jump), U8(20),
B(Ldar), R(11),
- B(JumpLoop), U8(81), I8(0),
+ /* 16 E> */ B(JumpLoop), U8(80), I8(0),
B(LdaSmi), I8(-1),
B(Star), R(8),
B(Star), R(7),
@@ -209,7 +205,7 @@ bytecodes: [
]
constant pool: [
Smi [21],
- Smi [125],
+ Smi [124],
Smi [10],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -224,8 +220,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [73, 157, 165],
- [189, 222, 224],
+ [72, 155, 163],
+ [187, 220, 222],
]
---
@@ -236,13 +232,12 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 205
+bytecode array length: 204
bytecodes: [
- /* 38 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
+ /* 38 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 38 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
B(ResumeGenerator), R(0), R(0), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index b24e5d0aa1..808f608f25 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(BitwiseAndSmi), I8(1), U8(2),
/* 45 E> */ B(StaGlobal), U8(0), U8(3),
@@ -36,9 +35,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(AddSmi), I8(1), U8(2),
/* 51 E> */ B(StaGlobal), U8(0), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index f0479d594d..dd6f8ea9e7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(Inc), U8(2),
/* 40 E> */ B(StaGlobal), U8(0), U8(3),
@@ -36,9 +35,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
B(Star), R(0),
@@ -61,9 +59,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 27 E> */ B(StackCheck),
/* 46 S> */ B(LdaGlobal), U8(0), U8(0),
B(Dec), U8(2),
/* 55 E> */ B(StaGlobal), U8(0), U8(3),
@@ -83,9 +80,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
index e1994bcdd2..4167138f1f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -16,9 +16,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 32 E> */ B(StackCheck),
/* 39 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -43,9 +42,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 28 E> */ B(StackCheck),
/* 51 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(0),
B(LdaSmi), I8(1),
@@ -68,9 +66,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 32 E> */ B(StackCheck),
/* 39 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
@@ -92,9 +89,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 18 E> */ B(StackCheck),
/* 25 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
index d6b6bb6c60..49a80d13c9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaConstant), U8(0),
/* 45 S> */ B(Return),
]
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 47 S> */ B(LdaConstant), U8(1),
@@ -50,9 +48,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 48 S> */ B(LdaConstant), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
index 56b6e0afac..8f45fff2cb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
@@ -21,10 +21,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 40
+bytecode array length: 39
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
@@ -65,10 +64,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 69
+bytecode array length: 67
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
@@ -78,8 +76,7 @@ bytecodes: [
B(Star), R(1),
B(LdaSmi), I8(5),
/* 59 E> */ B(TestLessThan), R(1), U8(6),
- B(JumpIfFalse), U8(43),
- /* 45 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(42),
/* 81 S> */ B(LdaGlobal), U8(0), U8(7),
B(Star), R(1),
B(LdaSmi), I8(2),
@@ -93,7 +90,7 @@ bytecodes: [
/* 66 S> */ B(LdaGlobal), U8(1), U8(4),
B(Inc), U8(15),
/* 66 E> */ B(StaGlobal), U8(1), U8(2),
- B(JumpLoop), U8(50), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(49), I8(0),
/* 149 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 156 S> */ B(Return),
]
@@ -124,10 +121,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 70
+bytecode array length: 68
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
@@ -137,8 +133,7 @@ bytecodes: [
B(Star), R(1),
B(LdaSmi), I8(4),
/* 68 E> */ B(TestGreaterThan), R(1), U8(6),
- B(JumpIfFalse), U8(43),
- /* 60 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(42),
/* 85 S> */ B(LdaGlobal), U8(0), U8(7),
B(Star), R(1),
B(LdaSmi), I8(2),
@@ -152,7 +147,7 @@ bytecodes: [
/* 128 S> */ B(LdaGlobal), U8(1), U8(4),
B(Dec), U8(15),
/* 129 E> */ B(StaGlobal), U8(1), U8(2),
- B(JumpLoop), U8(50), I8(0),
+ /* 60 E> */ B(JumpLoop), U8(49), I8(0),
/* 168 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 175 S> */ B(Return),
]
@@ -183,16 +178,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 70
+bytecode array length: 68
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
/* 45 S> */ B(LdaSmi), I8(4),
/* 47 E> */ B(StaGlobal), U8(1), U8(2),
- /* 60 E> */ B(StackCheck),
/* 75 S> */ B(LdaGlobal), U8(0), U8(4),
B(Star), R(1),
B(LdaSmi), I8(2),
@@ -211,7 +204,7 @@ bytecodes: [
B(LdaSmi), I8(4),
/* 141 E> */ B(TestGreaterThan), R(1), U8(15),
B(JumpIfFalse), U8(5),
- B(JumpLoop), U8(50), I8(0),
+ /* 60 E> */ B(JumpLoop), U8(49), I8(0),
/* 171 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 178 S> */ B(Return),
]
@@ -244,10 +237,9 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 71
+bytecode array length: 70
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(LdaConstant), U8(0),
B(Star), R(2),
@@ -297,10 +289,9 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(LdaConstant), U8(0),
B(Star), R(3),
@@ -330,10 +321,9 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(LdaConstant), U8(0),
B(Star), R(3),
@@ -374,10 +364,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 137
+bytecode array length: 136
bytecodes: [
- /* 237 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 237 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 255 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(0),
B(Star), R(1),
@@ -469,10 +458,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 140
+bytecode array length: 139
bytecodes: [
- /* 189 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 189 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 202 S> */ B(LdaUndefined),
B(Star), R(2),
@@ -567,10 +555,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 43
+bytecode array length: 42
bytecodes: [
- /* 79 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 79 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 93 S> */ B(CreateEmptyObjectLiteral),
/* 95 E> */ B(StaGlobal), U8(0), U8(0),
@@ -616,10 +603,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 43
+bytecode array length: 42
bytecodes: [
- /* 76 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 76 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 92 S> */ B(CreateEmptyObjectLiteral),
/* 94 E> */ B(StaGlobal), U8(0), U8(0),
@@ -657,9 +643,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 25 E> */ B(StackCheck),
/* 32 S> */ B(LdaSmi), I8(3),
/* 36 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 41 S> */ B(Ldar), R(arg0),
@@ -680,9 +665,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 26 E> */ B(StackCheck),
/* 33 S> */ B(LdaSmi), I8(3),
/* 37 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 42 S> */ B(Ldar), R(arg0),
@@ -703,9 +687,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 37 S> */ B(LdaSmi), I8(3),
/* 41 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 46 S> */ B(Ldar), R(arg0),
@@ -727,9 +710,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 29 E> */ B(StackCheck),
/* 36 S> */ B(LdaSmi), I8(3),
/* 40 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 45 S> */ B(Ldar), R(arg0),
@@ -751,10 +733,9 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- /* 46 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 46 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
@@ -784,10 +765,9 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
index c3b8dcc22f..c911947481 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
@@ -20,10 +20,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 42
+bytecode array length: 41
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
@@ -68,10 +67,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 65
+bytecode array length: 64
bytecodes: [
- /* 16 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 16 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 29 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 31 E> */ B(StaGlobal), U8(1), U8(1),
@@ -128,10 +126,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 144
+bytecode array length: 143
bytecodes: [
- /* 237 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 237 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 255 S> */ B(LdaNamedProperty), R(this), U8(0), U8(0),
B(Star), R(1),
@@ -221,10 +218,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 126
+bytecode array length: 125
bytecodes: [
- /* 189 E> */ B(StackCheck),
- B(CreateMappedArguments),
+ /* 189 E> */ B(CreateMappedArguments),
B(Star), R(0),
/* 202 S> */ B(LdaGlobal), U8(0), U8(0),
B(Star), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index 46d972af0c..de6b5e0844 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -19,9 +19,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 55 S> */ B(LdaSmi), I8(-1),
/* 65 S> */ B(Return),
]
@@ -43,9 +42,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 36 S> */ B(LdaSmi), I8(1),
/* 45 S> */ B(Return),
]
@@ -67,9 +65,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 59 S> */ B(LdaSmi), I8(-1),
/* 69 S> */ B(Return),
]
@@ -89,9 +86,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 17 S> */ B(LdaUndefined),
/* 48 S> */ B(Return),
]
@@ -114,9 +110,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 20
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(11),
@@ -147,9 +142,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaZero),
/* 24 E> */ B(TestLessThanOrEqual), R(arg0), U8(0),
B(JumpIfFalse), U8(7),
@@ -169,9 +163,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(arg1),
/* 25 E> */ B(TestIn), R(arg0), U8(0),
B(JumpIfFalse), U8(7),
@@ -256,9 +249,8 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 24 S> */ B(LdaZero),
B(Star), R(0),
/* 35 S> */ B(LdaZero),
@@ -353,9 +345,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaZero),
B(Star), R(0),
/* 36 S> */ B(LdaZero),
@@ -390,9 +381,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 83
+bytecode array length: 82
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
/* 27 E> */ B(TestEqual), R(arg0), U8(0),
B(JumpIfFalse), U8(5),
@@ -455,9 +445,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaZero),
B(Star), R(0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(5),
@@ -486,9 +475,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 34
+bytecode array length: 33
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
/* 27 E> */ B(TestEqual), R(arg0), U8(0),
B(JumpIfTrue), U8(8),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
index ea0f55d478..951b3543a8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(ExtraWide), B(LdaSmi), I32(12345678),
/* 50 S> */ B(Return),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
B(Star), R(0),
/* 48 S> */ B(Wide), B(LdaSmi), I16(5678),
@@ -47,9 +45,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
B(Star), R(0),
/* 48 S> */ B(Wide), B(LdaSmi), I16(1234),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
index a7900cf8eb..0ee86f4cea 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
B(LdaUndefined),
@@ -30,9 +29,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 55 S> */ B(Return),
@@ -48,9 +46,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(LdaTheHole),
B(Star), R(0),
/* 42 S> */ B(LdaSmi), I8(20),
@@ -73,9 +70,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(20),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index 61f898d5d7..fe82887e3c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -11,10 +11,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -35,10 +34,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 16
+bytecode array length: 15
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -59,10 +57,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -89,10 +86,9 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
index c34d6bd739..3199c09d94 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 21 E> */ B(StackCheck),
/* 26 S> */ B(LdaGlobal), U8(0), U8(0),
/* 35 S> */ B(Return),
]
@@ -34,9 +33,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
/* 41 S> */ B(Return),
]
@@ -54,9 +52,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 17 E> */ B(StackCheck),
/* 22 S> */ B(LdaGlobal), U8(0), U8(0),
/* 31 S> */ B(Return),
]
@@ -205,9 +202,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 523
+bytecode array length: 522
bytecodes: [
- /* 17 E> */ B(StackCheck),
/* 33 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 41 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
index 9f51448614..ddcf588467 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(4),
@@ -31,9 +30,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
@@ -53,9 +51,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(4),
@@ -73,9 +70,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaZero),
@@ -95,9 +91,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(4),
@@ -115,9 +110,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(2),
B(Star), R(0),
/* 49 S> */ B(LdaSmi), I8(3),
@@ -174,9 +168,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 276
+bytecode array length: 275
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -360,9 +353,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 275
+bytecode array length: 274
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -546,9 +538,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 279
+bytecode array length: 278
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -733,9 +724,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 278
+bytecode array length: 277
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -888,9 +878,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaZero),
/* 48 S> */ B(Return),
]
@@ -905,9 +894,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(1),
/* 48 S> */ B(Return),
]
@@ -922,9 +910,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index f7da45c224..f7a4289e61 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -12,10 +12,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 63
+bytecode array length: 62
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
@@ -57,10 +56,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 64
+bytecode array length: 63
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
@@ -103,10 +101,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 64
+bytecode array length: 63
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
@@ -154,10 +151,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 63
+bytecode array length: 62
bytecodes: [
- /* 38 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 38 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
@@ -204,10 +200,9 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 63
+bytecode array length: 62
bytecodes: [
- /* 34 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 34 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
index 0ace6d1dc5..f8b66ce3d2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
@@ -17,9 +17,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaLookupGlobalSlot), U8(0), U8(0), U8(1),
/* 24 S> */ B(Return),
]
@@ -40,9 +39,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaSmi), I8(10),
/* 17 E> */ B(StaLookupSlot), U8(0), U8(0),
B(LdaUndefined),
@@ -65,9 +63,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 29 S> */ B(LdaSmi), I8(10),
/* 31 E> */ B(StaLookupSlot), U8(0), U8(1),
B(LdaUndefined),
@@ -90,9 +87,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(0), U8(0), U8(1),
B(TypeOf),
/* 31 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index b21dea1dcb..854a78c921 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -11,37 +11,30 @@ top level: yes
snippet: "
import \"bar\";
"
-frame size: 5
-parameter count: 2
-bytecode array length: 62
+frame size: 4
+parameter count: 1
+bytecode array length: 47
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 14 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 14 S> */ B(Return),
]
constant pool: [
- Smi [35],
- SCOPE_INFO_TYPE,
+ Smi [21],
Smi [10],
Smi [7],
]
@@ -52,37 +45,30 @@ handlers: [
snippet: "
import {foo} from \"bar\";
"
-frame size: 5
-parameter count: 2
-bytecode array length: 62
+frame size: 4
+parameter count: 1
+bytecode array length: 47
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 25 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 25 S> */ B(Return),
]
constant pool: [
- Smi [35],
- SCOPE_INFO_TYPE,
+ Smi [21],
Smi [10],
Smi [7],
]
@@ -95,50 +81,43 @@ snippet: "
goo(42);
{ let x; { goo(42) } };
"
-frame size: 6
-parameter count: 2
-bytecode array length: 92
+frame size: 5
+parameter count: 1
+bytecode array length: 77
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 65 S> */ B(Return),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
- B(ThrowReferenceErrorIfHole), U8(4),
- B(Star), R(4),
+ B(ThrowReferenceErrorIfHole), U8(3),
+ B(Star), R(3),
B(LdaSmi), I8(42),
- B(Star), R(5),
- /* 32 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(0),
+ B(Star), R(4),
+ /* 32 E> */ B(CallUndefinedReceiver1), R(3), R(4), U8(0),
/* 47 S> */ B(LdaUndefined),
B(Star), R(2),
/* 52 S> */ B(LdaModuleVariable), I8(-1), U8(0),
- B(ThrowReferenceErrorIfHole), U8(4),
- B(Star), R(4),
+ B(ThrowReferenceErrorIfHole), U8(3),
+ B(Star), R(3),
B(LdaSmi), I8(42),
- B(Star), R(5),
- /* 52 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(2),
+ B(Star), R(4),
+ /* 52 E> */ B(CallUndefinedReceiver1), R(3), R(4), U8(2),
B(Star), R(1),
/* 65 S> */ B(Return),
]
constant pool: [
- Smi [35],
- SCOPE_INFO_TYPE,
+ Smi [21],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
@@ -152,29 +131,23 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 6
-parameter count: 2
-bytecode array length: 90
+frame size: 5
+parameter count: 1
+bytecode array length: 75
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 50 S> */ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
@@ -185,16 +158,15 @@ bytecodes: [
B(Star), R(2),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(4),
+ B(Star), R(3),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [35],
- SCOPE_INFO_TYPE,
+ Smi [21],
Smi [10],
Smi [7],
]
@@ -207,32 +179,28 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 6
-parameter count: 2
-bytecode array length: 96
+frame size: 5
+parameter count: 1
+bytecode array length: 89
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(Star), R(0),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
+ B(Star), R(3),
B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(0),
- B(LdaTheHole),
- B(StaModuleVariable), I8(1), U8(0),
+ B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(3), U8(2),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 50 S> */ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
@@ -243,16 +211,16 @@ bytecodes: [
B(Star), R(2),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(4),
+ B(Star), R(3),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [41],
- SCOPE_INFO_TYPE,
+ Smi [35],
+ FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
]
@@ -265,32 +233,28 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 6
-parameter count: 2
-bytecode array length: 100
+frame size: 5
+parameter count: 1
+bytecode array length: 93
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(Star), R(0),
B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
+ B(Star), R(3),
B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(0),
- B(LdaTheHole),
- B(StaModuleVariable), I8(1), U8(0),
+ B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(3), U8(2),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 52 S> */ B(Return),
/* 19 S> */ B(LdaSmi), I8(42),
/* 19 E> */ B(StaModuleVariable), I8(1), U8(0),
@@ -301,16 +265,16 @@ bytecodes: [
B(Star), R(2),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(4),
+ B(Star), R(3),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 52 S> */ B(Return),
]
constant pool: [
- Smi [41],
- SCOPE_INFO_TYPE,
+ Smi [35],
+ FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
]
@@ -321,42 +285,38 @@ handlers: [
snippet: "
export default (function () {});
"
-frame size: 5
-parameter count: 2
-bytecode array length: 75
+frame size: 4
+parameter count: 1
+bytecode array length: 68
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(Star), R(0),
B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
+ B(Star), R(2),
B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
- B(LdaTheHole),
- B(StaModuleVariable), I8(1), U8(0),
+ B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(2), U8(2),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 33 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(CreateClosure), U8(4), U8(0), U8(0),
B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(1),
/* 33 S> */ B(Return),
]
constant pool: [
- Smi [41],
- SCOPE_INFO_TYPE,
+ Smi [35],
+ FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
SHARED_FUNCTION_INFO_TYPE,
@@ -368,51 +328,47 @@ handlers: [
snippet: "
export default (class {});
"
-frame size: 7
-parameter count: 2
-bytecode array length: 96
+frame size: 6
+parameter count: 1
+bytecode array length: 89
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(Star), R(0),
B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
+ B(Star), R(2),
B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
- B(LdaTheHole),
- B(StaModuleVariable), I8(1), U8(0),
+ B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(2), U8(2),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 27 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(5),
B(CreateClosure), U8(5), U8(0), U8(0),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaConstant), U8(4),
- B(Star), R(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(Ldar), R(5),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
+ B(Star), R(3),
+ B(Ldar), R(4),
B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(1),
/* 27 S> */ B(Return),
]
constant pool: [
- Smi [41],
- SCOPE_INFO_TYPE,
+ Smi [35],
+ FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
FIXED_ARRAY_TYPE,
@@ -425,37 +381,30 @@ handlers: [
snippet: "
export {foo as goo} from \"bar\"
"
-frame size: 5
-parameter count: 2
-bytecode array length: 62
+frame size: 4
+parameter count: 1
+bytecode array length: 47
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 31 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 31 S> */ B(Return),
]
constant pool: [
- Smi [35],
- SCOPE_INFO_TYPE,
+ Smi [21],
Smi [10],
Smi [7],
]
@@ -466,37 +415,30 @@ handlers: [
snippet: "
export * from \"bar\"
"
-frame size: 5
-parameter count: 2
-bytecode array length: 62
+frame size: 4
+parameter count: 1
+bytecode array length: 47
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(arg0), R(2),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(2),
- B(PushContext), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
B(Star), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(2),
/* 0 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 20 S> */ B(Return),
- B(Mov), R(3), R(1),
+ B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 20 S> */ B(Return),
]
constant pool: [
- Smi [35],
- SCOPE_INFO_TYPE,
+ Smi [21],
Smi [10],
Smi [7],
]
@@ -508,46 +450,39 @@ snippet: "
import * as foo from \"bar\"
foo.f(foo, foo.x);
"
-frame size: 8
-parameter count: 2
-bytecode array length: 89
+frame size: 7
+parameter count: 1
+bytecode array length: 74
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(Mov), R(arg0), R(3),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(2),
- B(PushContext), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ /* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
B(LdaZero),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(4), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(3), U8(1),
B(Star), R(1),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 46 S> */ B(Return),
- /* 31 S> */ B(LdaNamedProperty), R(1), U8(4), U8(0),
- B(Star), R(4),
- /* 42 E> */ B(LdaNamedProperty), R(1), U8(5), U8(2),
- B(Star), R(7),
- /* 31 E> */ B(CallProperty2), R(4), R(1), R(1), R(7), U8(4),
+ /* 31 S> */ B(LdaNamedProperty), R(1), U8(3), U8(0),
+ B(Star), R(3),
+ /* 42 E> */ B(LdaNamedProperty), R(1), U8(4), U8(2),
+ B(Star), R(6),
+ /* 31 E> */ B(CallProperty2), R(3), R(1), R(1), R(6), U8(4),
B(Star), R(2),
/* 46 S> */ B(Return),
]
constant pool: [
- Smi [47],
- SCOPE_INFO_TYPE,
+ Smi [33],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["f"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 2d994ac100..d0ab8d1281 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -12,10 +12,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 48
+bytecode array length: 47
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -51,10 +50,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 51
+bytecode array length: 50
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -92,10 +90,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 122
+bytecode array length: 121
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
index 53ef573033..a7681926c9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(Ldar), R(0),
/* 52 S> */ B(Return),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaUndefined),
/* 46 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index 82c47ff88d..a5256793a0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateEmptyObjectLiteral),
/* 45 S> */ B(Return),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 70 S> */ B(Return),
]
@@ -46,9 +44,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
@@ -71,9 +68,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
@@ -97,9 +93,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 49 E> */ B(CreateClosure), U8(1), U8(0), U8(2),
@@ -121,9 +116,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
/* 43 E> */ B(CreateClosure), U8(1), U8(0), U8(2),
@@ -145,9 +139,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 34
+bytecode array length: 33
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -177,9 +170,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 37
+bytecode array length: 36
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -210,9 +202,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 34
+bytecode array length: 33
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
B(LdaConstant), U8(1),
@@ -242,9 +233,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
@@ -269,9 +259,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(57),
/* 61 S> */ B(Return),
]
@@ -287,9 +276,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
@@ -314,9 +302,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 31
+bytecode array length: 30
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
@@ -344,9 +331,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
@@ -375,9 +361,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 64
+bytecode array length: 63
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
index 146715e03a..bb4aac932e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -20,9 +20,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 97 E> */ B(StackCheck),
/* 102 S> */ B(LdaImmutableContextSlot), R(context), U8(2), U8(1),
B(Star), R(0),
B(LdaImmutableCurrentContextSlot), U8(2),
@@ -48,9 +47,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 97 E> */ B(StackCheck),
/* 102 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
/* 111 E> */ B(StaContextSlot), R(context), U8(2), U8(1),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
index 81d6408e14..d52e3016b4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 15 S> */ B(Ldar), R(this),
/* 27 S> */ B(Return),
]
@@ -31,9 +30,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(arg0),
/* 31 S> */ B(Return),
]
@@ -49,9 +47,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(this),
/* 31 S> */ B(Return),
]
@@ -67,9 +64,8 @@ snippet: "
"
frame size: 0
parameter count: 8
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 55 S> */ B(Ldar), R(arg3),
/* 67 S> */ B(Return),
]
@@ -85,9 +81,8 @@ snippet: "
"
frame size: 0
parameter count: 8
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 55 S> */ B(Ldar), R(this),
/* 67 S> */ B(Return),
]
@@ -103,9 +98,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 19 S> */ B(LdaSmi), I8(1),
B(Star), R(arg0),
B(LdaUndefined),
@@ -123,9 +117,8 @@ snippet: "
"
frame size: 0
parameter count: 5
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 37 S> */ B(LdaSmi), I8(1),
B(Star), R(arg1),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
index 4b0d05f8b0..daf96cdd44 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 54 S> */ B(Return),
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 54 S> */ B(AddSmi), I8(3), U8(0),
@@ -48,9 +46,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(3),
@@ -70,9 +67,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 54 S> */ B(SubSmi), I8(3), U8(0),
@@ -89,9 +85,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(3),
@@ -111,9 +106,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 54 S> */ B(MulSmi), I8(3), U8(0),
@@ -130,9 +124,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 54 S> */ B(MulSmi), I8(3), U8(0),
@@ -149,9 +142,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 54 S> */ B(DivSmi), I8(3), U8(0),
@@ -168,9 +160,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(3),
@@ -190,9 +181,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 54 S> */ B(ModSmi), I8(3), U8(0),
@@ -209,9 +199,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(3),
@@ -231,9 +220,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 54 S> */ B(BitwiseOrSmi), I8(2), U8(0),
@@ -250,9 +238,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 54 S> */ B(BitwiseOrSmi), I8(2), U8(0),
@@ -269,9 +256,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 54 S> */ B(BitwiseXorSmi), I8(2), U8(0),
@@ -288,9 +274,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 54 S> */ B(BitwiseXorSmi), I8(2), U8(0),
@@ -307,9 +292,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 54 S> */ B(BitwiseAndSmi), I8(2), U8(0),
@@ -326,9 +310,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 54 S> */ B(BitwiseAndSmi), I8(2), U8(0),
@@ -345,9 +328,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 55 S> */ B(ShiftLeftSmi), I8(3), U8(0),
@@ -364,9 +346,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(3),
@@ -386,9 +367,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 55 S> */ B(ShiftRightSmi), I8(3), U8(0),
@@ -405,9 +385,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(3),
@@ -427,9 +406,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 55 S> */ B(ShiftRightLogicalSmi), I8(3), U8(0),
@@ -446,9 +424,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(3),
@@ -468,9 +445,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
index e31ab85708..77f55145cc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
@@ -10,9 +10,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -27,9 +26,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaUndefined),
/* 41 S> */ B(Return),
]
@@ -44,9 +42,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaNull),
/* 46 S> */ B(Return),
]
@@ -61,9 +58,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaTrue),
/* 46 S> */ B(Return),
]
@@ -78,9 +74,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaFalse),
/* 47 S> */ B(Return),
]
@@ -95,9 +90,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaZero),
/* 43 S> */ B(Return),
]
@@ -112,9 +106,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(1),
/* 44 S> */ B(Return),
]
@@ -129,9 +122,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(-1),
/* 44 S> */ B(Return),
]
@@ -146,9 +138,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(127),
/* 46 S> */ B(Return),
]
@@ -163,9 +154,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(-128),
/* 46 S> */ B(Return),
]
@@ -180,9 +170,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(2),
/* 45 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index ff4c255949..7ac886945c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -24,14 +24,13 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 98
+bytecode array length: 97
bytecodes: [
- /* 67 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 67 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
/* 76 S> */ B(LdaCurrentContextSlot), U8(2),
B(Star), R(4),
B(LdaCurrentContextSlot), U8(3),
@@ -78,15 +77,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 48 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(266),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -110,15 +108,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 41 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -142,15 +139,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 48 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(266),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -174,15 +170,14 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 41 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(4),
B(LdaConstant), U8(0),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
index cf12be82ba..085e38d7db 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
@@ -17,10 +17,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 65
+bytecode array length: 64
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
@@ -67,10 +66,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 62
+bytecode array length: 61
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
@@ -116,10 +114,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 62
+bytecode array length: 61
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
@@ -171,10 +168,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 127
+bytecode array length: 126
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaConstant), U8(2),
B(Star), R(4),
@@ -251,10 +247,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 113
+bytecode array length: 112
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(Star), R(6),
@@ -325,10 +320,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 113
+bytecode array length: 112
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(Star), R(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index 90ae0a6688..5e00600435 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -24,10 +24,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 131
+bytecode array length: 130
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaConstant), U8(2),
B(Star), R(4),
@@ -130,10 +129,9 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 268
+bytecode array length: 267
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaConstant), U8(2),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index d413b43af8..cd5dd6f5a6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -19,14 +19,13 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 31
+bytecode array length: 30
bytecodes: [
- /* 44 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
/* 49 S> */ B(LdaCurrentContextSlot), U8(3),
/* 61 E> */ B(LdaKeyedProperty), R(this), U8(0),
B(LdaCurrentContextSlot), U8(2),
@@ -51,15 +50,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 44 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(263),
+ /* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -84,15 +82,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 44 E> */ B(StackCheck),
B(LdaCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
- B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(263),
+ /* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -117,10 +114,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 58
+bytecode array length: 57
bytecodes: [
- /* 44 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(1),
+ /* 44 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
index ef7c245d46..9643de4f23 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
@@ -16,10 +16,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 52
+bytecode array length: 51
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
@@ -64,10 +63,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 101
+bytecode array length: 100
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaConstant), U8(2),
B(Star), R(4),
@@ -133,10 +131,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 98
+bytecode array length: 97
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(Star), R(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index 9d55cd6cb8..2e067b6f53 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
/* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(2),
@@ -34,9 +33,8 @@ snippet: "
"
frame size: 1
parameter count: 4
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 31 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
/* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(2),
@@ -55,9 +53,8 @@ snippet: "
"
frame size: 3
parameter count: 3
-bytecode array length: 21
+bytecode array length: 20
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 28 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
B(Ldar), R(arg1),
@@ -210,9 +207,8 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 543
+bytecode array length: 542
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 26 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 34 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
@@ -490,9 +486,8 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 51
+bytecode array length: 50
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(2),
B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
index e6eacf6fd6..6fd6116f0c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
@@ -22,9 +22,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 77
+bytecode array length: 76
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(2),
B(LdaSmi), I8(41),
@@ -80,9 +79,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 120
+bytecode array length: 118
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(2),
B(LdaSmi), I8(41),
@@ -97,8 +95,7 @@ bytecodes: [
B(Star), R(1),
B(LdaSmi), I8(5),
/* 77 E> */ B(TestLessThan), R(1), U8(6),
- B(JumpIfFalse), U8(83),
- /* 63 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(82),
/* 97 S> */ B(LdaGlobal), U8(1), U8(7),
B(Star), R(1),
/* 106 E> */ B(LdaGlobal), U8(1), U8(7),
@@ -126,7 +123,7 @@ bytecodes: [
/* 84 S> */ B(LdaGlobal), U8(2), U8(4),
B(Inc), U8(19),
/* 84 E> */ B(StaGlobal), U8(2), U8(2),
- B(JumpLoop), U8(90), I8(0),
+ /* 63 E> */ B(JumpLoop), U8(89), I8(0),
B(Ldar), R(0),
/* 171 S> */ B(Return),
]
@@ -155,9 +152,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 109
+bytecode array length: 107
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(2),
B(LdaSmi), I8(41),
@@ -170,8 +166,7 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
/* 72 E> */ B(TestGreaterThan), R(1), U8(4),
- B(JumpIfFalse), U8(77),
- /* 63 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(76),
/* 87 S> */ B(LdaGlobal), U8(1), U8(5),
B(Star), R(1),
/* 97 E> */ B(LdaGlobal), U8(1), U8(5),
@@ -197,7 +192,7 @@ bytecodes: [
/* 130 E> */ B(StaNamedProperty), R(1), U8(4), U8(15),
B(Mov), R(2), R(0),
B(Ldar), R(2),
- B(JumpLoop), U8(83), I8(0),
+ /* 63 E> */ B(JumpLoop), U8(82), I8(0),
B(Ldar), R(0),
/* 163 S> */ B(Return),
]
@@ -226,9 +221,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 81
+bytecode array length: 79
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(2),
B(LdaSmi), I8(41),
@@ -239,7 +233,6 @@ bytecodes: [
/* 65 E> */ B(StaGlobal), U8(2), U8(2),
B(LdaUndefined),
B(Star), R(0),
- /* 77 E> */ B(StackCheck),
/* 90 S> */ B(LdaGlobal), U8(1), U8(4),
B(Star), R(1),
/* 99 E> */ B(LdaGlobal), U8(1), U8(4),
@@ -258,7 +251,7 @@ bytecodes: [
B(LdaSmi), I8(10),
/* 133 E> */ B(TestLessThan), R(1), U8(15),
B(JumpIfFalse), U8(5),
- B(JumpLoop), U8(50), I8(0),
+ /* 77 E> */ B(JumpLoop), U8(49), I8(0),
B(Ldar), R(0),
/* 146 S> */ B(Return),
]
@@ -288,9 +281,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 75
+bytecode array length: 74
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(2),
B(LdaSmi), I8(41),
@@ -339,9 +331,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(3),
B(LdaSmi), I8(4),
@@ -366,9 +357,8 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(LdaConstant), U8(0),
B(Star), R(3),
B(LdaSmi), I8(37),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
index fe75e8a344..ef630cfa6f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
@@ -21,9 +21,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 71
+bytecode array length: 70
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 66 S> */ B(LdaGlobal), U8(1), U8(4),
@@ -76,9 +75,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 67
+bytecode array length: 66
bytecodes: [
- /* 0 E> */ B(StackCheck),
/* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 65 S> */ B(LdaGlobal), U8(1), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index 4b1a1c03a9..e28ded8006 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
/* 30 S> */ B(Return),
]
@@ -32,9 +31,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 24 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
/* 32 S> */ B(Return),
]
@@ -51,9 +49,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), I8(100),
/* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
/* 30 S> */ B(Return),
@@ -70,9 +67,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(arg1),
/* 27 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
/* 31 S> */ B(Return),
@@ -89,9 +85,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 26 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
B(Star), R(0),
/* 32 S> */ B(LdaSmi), I8(-124),
@@ -242,9 +237,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 525
+bytecode array length: 524
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 26 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 34 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
@@ -650,9 +644,8 @@ snippet: "
"
frame size: 1
parameter count: 3
-bytecode array length: 906
+bytecode array length: 905
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 30 S> */ B(Ldar), R(arg1),
/* 35 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 2bb062b707..32e4e31848 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
/* 23 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
@@ -35,9 +34,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
/* 25 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
@@ -57,9 +55,8 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), I8(100),
B(Star), R(1),
B(LdaConstant), U8(0),
@@ -80,9 +77,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 19 S> */ B(LdaConstant), U8(0),
/* 24 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
B(LdaUndefined),
@@ -101,9 +97,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), I8(-124),
/* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
/* 23 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(2),
@@ -123,9 +118,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaConstant), U8(0),
/* 37 E> */ B(StaNamedProperty), R(arg0), U8(1), U8(0),
B(LdaUndefined),
@@ -145,9 +139,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaConstant), U8(0),
/* 38 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
B(LdaUndefined),
@@ -298,9 +291,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 534
+bytecode array length: 533
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaSmi), I8(1),
/* 25 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 40 S> */ B(CreateEmptyObjectLiteral),
@@ -712,9 +704,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 534
+bytecode array length: 533
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaSmi), I8(1),
/* 40 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 55 S> */ B(CreateEmptyObjectLiteral),
@@ -1123,9 +1114,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 781
+bytecode array length: 780
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 21 S> */ B(LdaSmi), I8(1),
/* 26 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
/* 33 S> */ B(LdaSmi), I8(1),
@@ -1530,9 +1520,8 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 781
+bytecode array length: 780
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 37 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaKeyedProperty), R(arg0), R(arg1), U8(0),
/* 49 S> */ B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index ab5fcf5c4c..a841e05375 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -23,10 +23,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 119
+bytecode array length: 118
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -122,10 +121,9 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 229
+bytecode array length: 228
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index 69f63eb8b4..8f43e97280 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
/* 48 S> */ B(Return),
]
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(2),
/* 57 S> */ B(Return),
]
@@ -47,9 +45,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
B(Star), R(1),
/* 48 E> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
index 7896ffc3ce..22831a3ff3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
@@ -16,12 +16,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 28
+bytecode array length: 26
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 48 E> */ B(StackCheck),
/* 64 S> */ B(Ldar), R(0),
/* 76 E> */ B(Add), R(0), U8(0),
B(Star), R(0),
@@ -29,7 +27,7 @@ bytecodes: [
/* 95 E> */ B(TestGreaterThan), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(5),
- B(JumpLoop), U8(17), I8(0),
+ /* 48 E> */ B(JumpLoop), U8(16), I8(0),
/* 110 S> */ B(Ldar), R(0),
/* 122 S> */ B(Return),
]
@@ -49,12 +47,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 23
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 48 E> */ B(StackCheck),
/* 67 S> */ B(Add), R(0), U8(0),
B(Star), R(0),
/* 77 S> */ B(LdaSmi), I8(10),
@@ -77,9 +73,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 62 S> */ B(Add), R(0), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 82a43ae36c..7d7a8d39b3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -15,20 +15,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaZero),
B(Star), R(0),
/* 35 S> */ B(LdaSmi), I8(10),
/* 35 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(15),
- /* 17 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(14),
/* 56 S> */ B(Mov), R(0), R(1),
/* 43 S> */ B(Ldar), R(1),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(17), I8(0),
+ /* 17 E> */ B(JumpLoop), U8(16), I8(0),
B(LdaUndefined),
/* 61 S> */ B(Return),
]
@@ -46,10 +44,9 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 165
+bytecode array length: 162
bytecodes: [
- /* 10 E> */ B(StackCheck),
- B(CreateFunctionContext), U8(0), U8(4),
+ /* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(4),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(3),
@@ -67,8 +64,7 @@ bytecodes: [
B(Star), R(0),
B(LdaSmi), I8(1),
B(Star), R(1),
- /* 59 E> */ B(StackCheck),
- B(CreateBlockContext), U8(2),
+ /* 59 E> */ B(CreateBlockContext), U8(2),
B(PushContext), R(6),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -92,11 +88,10 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(6),
B(PopContext), R(6),
- B(Jump), U8(77),
+ B(Jump), U8(76),
B(LdaSmi), I8(1),
B(TestEqual), R(2), U8(3),
- B(JumpIfFalse), U8(54),
- /* 17 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(53),
/* 48 S> */ B(LdaLookupGlobalSlot), U8(3), U8(4), U8(3),
B(Star), R(7),
B(LdaConstant), U8(4),
@@ -117,14 +112,14 @@ bytecodes: [
B(Star), R(2),
B(LdaCurrentContextSlot), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(56), I8(1),
+ /* 17 E> */ B(JumpLoop), U8(55), I8(1),
B(LdaSmi), I8(1),
- /* 59 E> */ B(TestEqual), R(2), U8(8),
+ B(TestEqual), R(2), U8(8),
B(JumpIfFalse), U8(6),
B(PopContext), R(6),
B(Jump), U8(7),
B(PopContext), R(6),
- B(JumpLoop), U8(123), I8(0),
+ B(JumpLoop), U8(121), I8(0),
B(PopContext), R(5),
B(LdaUndefined),
/* 61 S> */ B(Return),
@@ -148,16 +143,14 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 106
+bytecode array length: 103
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaZero),
B(Star), R(3),
B(Star), R(0),
B(LdaSmi), I8(1),
B(Star), R(1),
- /* 78 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 78 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(4),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -181,11 +174,10 @@ bytecodes: [
B(JumpIfFalse), U8(4),
B(Jump), U8(6),
B(PopContext), R(4),
- B(Jump), U8(45),
+ B(Jump), U8(44),
B(LdaSmi), I8(1),
B(TestEqual), R(2), U8(3),
- B(JumpIfFalse), U8(22),
- /* 17 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(21),
/* 48 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(Star), R(5),
/* 74 E> */ B(CallUndefinedReceiver0), R(5), U8(4),
@@ -193,14 +185,14 @@ bytecodes: [
B(Star), R(2),
B(LdaCurrentContextSlot), U8(2),
B(Star), R(0),
- B(JumpLoop), U8(24), I8(1),
+ /* 17 E> */ B(JumpLoop), U8(23), I8(1),
B(LdaSmi), I8(1),
- /* 78 E> */ B(TestEqual), R(2), U8(6),
+ B(TestEqual), R(2), U8(6),
B(JumpIfFalse), U8(6),
B(PopContext), R(4),
B(Jump), U8(7),
B(PopContext), R(4),
- B(JumpLoop), U8(91), I8(0),
+ B(JumpLoop), U8(89), I8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
@@ -220,9 +212,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 44
+bytecode array length: 42
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 37 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(3),
/* 28 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
@@ -231,15 +222,14 @@ bytecodes: [
B(Star), R(1),
/* 55 S> */ B(LdaZero),
/* 55 E> */ B(TestGreaterThan), R(1), U8(5),
- B(JumpIfFalse), U8(19),
- /* 17 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(18),
/* 75 S> */ B(Ldar), R(1),
/* 77 E> */ B(Add), R(0), U8(6),
B(Star), R(2),
/* 62 S> */ B(Ldar), R(1),
B(Dec), U8(7),
B(Star), R(1),
- B(JumpLoop), U8(20), I8(0),
+ /* 17 E> */ B(JumpLoop), U8(19), I8(0),
B(LdaUndefined),
/* 84 S> */ B(Return),
]
@@ -260,13 +250,12 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 67
+bytecode array length: 65
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
@@ -281,13 +270,12 @@ bytecodes: [
B(Star), R(1),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(15),
- /* 18 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(14),
/* 57 S> */ B(Mov), R(1), R(2),
/* 44 S> */ B(Ldar), R(2),
B(Inc), U8(1),
B(Star), R(1),
- B(JumpLoop), U8(17), I8(0),
+ /* 18 E> */ B(JumpLoop), U8(16), I8(0),
B(LdaUndefined),
/* 62 S> */ B(Return),
]
@@ -308,13 +296,12 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 99
+bytecode array length: 97
bytecodes: [
- /* 11 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ /* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
B(Star), R(0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
@@ -329,8 +316,7 @@ bytecodes: [
B(Star), R(1),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(47),
- /* 18 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(46),
/* 47 S> */ B(LdaFalse),
B(Star), R(3),
B(Mov), R(1), R(2),
@@ -347,13 +333,13 @@ bytecodes: [
/* 44 S> */ B(Ldar), R(1),
B(Inc), U8(1),
B(Star), R(1),
- B(JumpLoop), U8(49), I8(0),
+ /* 18 E> */ B(JumpLoop), U8(48), I8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
]
constant pool: [
Smi [21],
- Smi [67],
+ Smi [66],
Smi [10],
Smi [7],
Smi [10],
@@ -371,31 +357,29 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 81
+bytecode array length: 79
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
B(Star), R(0),
B(Mov), R(context), R(3),
/* 36 S> */ B(LdaZero),
B(Star), R(1),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(15),
- /* 23 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(14),
/* 62 S> */ B(Mov), R(1), R(2),
/* 49 S> */ B(Ldar), R(2),
B(Inc), U8(1),
B(Star), R(1),
- B(JumpLoop), U8(17), I8(0),
+ /* 23 E> */ B(JumpLoop), U8(16), I8(0),
B(LdaUndefined),
B(Star), R(5),
B(LdaFalse),
B(Star), R(6),
B(Mov), R(0), R(4),
- /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 67 S> */ B(Return),
B(Star), R(4),
B(CreateCatchContext), R(4), U8(0),
@@ -416,7 +400,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [16, 53, 53],
+ [15, 51, 51],
]
---
@@ -428,21 +412,19 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 117
+bytecode array length: 115
bytecodes: [
- /* 16 E> */ B(StackCheck),
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
B(Star), R(0),
B(Mov), R(context), R(2),
/* 36 S> */ B(LdaZero),
B(Star), R(1),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(47),
- /* 23 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(46),
/* 52 S> */ B(Mov), R(0), R(3),
B(Mov), R(1), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
@@ -459,13 +441,13 @@ bytecodes: [
/* 49 S> */ B(Ldar), R(1),
B(Inc), U8(1),
B(Star), R(1),
- B(JumpLoop), U8(49), I8(0),
+ /* 23 E> */ B(JumpLoop), U8(48), I8(0),
B(LdaUndefined),
B(Star), R(4),
B(LdaTrue),
B(Star), R(5),
B(Mov), R(0), R(3),
- /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 61 S> */ B(Return),
B(Star), R(3),
B(CreateCatchContext), R(3), U8(1),
@@ -483,10 +465,10 @@ bytecodes: [
/* 61 S> */ B(Return),
]
constant pool: [
- Smi [45],
+ Smi [44],
SCOPE_INFO_TYPE,
]
handlers: [
- [20, 89, 89],
+ [19, 87, 87],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index e456906b2b..2b7639a885 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -27,10 +27,9 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 185
+bytecode array length: 184
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
@@ -162,10 +161,9 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 334
+bytecode array length: 333
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 70628162df..352ec83961 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -19,14 +19,13 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 51 E> */ B(StackCheck),
/* 56 S> */ B(LdaCurrentContextSlot), U8(3),
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(261),
+ B(Wide), B(LdaSmi), I16(262),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -55,10 +54,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 51 E> */ B(StackCheck),
- /* 56 S> */ B(Wide), B(LdaSmi), I16(263),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -83,10 +81,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 51 E> */ B(StackCheck),
- /* 56 S> */ B(Wide), B(LdaSmi), I16(263),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -117,16 +114,15 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 143
+bytecode array length: 142
bytecodes: [
- /* 81 E> */ B(StackCheck),
/* 90 S> */ B(LdaCurrentContextSlot), U8(2),
B(Star), R(1),
B(LdaCurrentContextSlot), U8(3),
/* 94 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(261),
+ B(Wide), B(LdaSmi), I16(262),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -148,7 +144,7 @@ bytecodes: [
/* 109 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(262),
+ B(Wide), B(LdaSmi), I16(263),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -163,7 +159,7 @@ bytecodes: [
/* 133 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(261),
+ B(Wide), B(LdaSmi), I16(262),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -191,10 +187,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 55 E> */ B(StackCheck),
- /* 60 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(266),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -218,10 +213,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 48 E> */ B(StackCheck),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -245,10 +239,9 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 55 E> */ B(StackCheck),
- /* 60 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(266),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -272,10 +265,9 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 41 E> */ B(StackCheck),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(1),
B(LdaConstant), U8(0),
B(Star), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
index b074e54243..1af4690aa5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
@@ -16,10 +16,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 41
+bytecode array length: 40
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -56,10 +55,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 51
+bytecode array length: 50
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -100,10 +98,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 51
+bytecode array length: 50
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -145,10 +142,9 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 54
+bytecode array length: 53
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
B(Star), R(5),
@@ -191,10 +187,9 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 58
+bytecode array length: 57
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateBlockContext), U8(0),
+ /* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index eed4f0467c..0a2c2995e6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 21 E> */ B(StackCheck),
/* 26 S> */ B(LdaSmi), I8(2),
/* 28 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
@@ -35,9 +34,8 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 26 E> */ B(StackCheck),
/* 32 S> */ B(Ldar), R(arg0),
/* 34 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
@@ -57,9 +55,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 35 E> */ B(StackCheck),
/* 40 S> */ B(LdaSmi), I8(2),
/* 42 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
@@ -79,9 +76,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 17 E> */ B(StackCheck),
/* 22 S> */ B(LdaSmi), I8(2),
/* 24 E> */ B(StaGlobal), U8(0), U8(0),
B(LdaUndefined),
@@ -232,9 +228,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 526
+bytecode array length: 525
bytecodes: [
- /* 17 E> */ B(StackCheck),
/* 33 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 41 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
@@ -644,9 +639,8 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 526
+bytecode array length: 525
bytecodes: [
- /* 17 E> */ B(StackCheck),
/* 49 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
/* 57 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden
index 3fa52040a8..9e94fe593d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -41,9 +40,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 26
+bytecode array length: 25
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -71,9 +69,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -99,9 +96,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 43
+bytecode array length: 42
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -138,9 +134,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 29
+bytecode array length: 28
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -170,10 +165,9 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 42
+bytecode array length: 41
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
+ /* 30 E> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(2),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
index 0f4b595c8e..385c3089c1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaConstant), U8(0),
/* 60 S> */ B(Return),
]
@@ -29,9 +28,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 58 S> */ B(LdaConstant), U8(1),
@@ -50,9 +48,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 57 S> */ B(LdaConstant), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 2fd27fcce1..4124d91274 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -19,10 +19,9 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
- /* 93 E> */ B(StackCheck),
- B(CreateRestParameter),
+ /* 93 E> */ B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(closure), R(1),
/* 93 S> */ B(Ldar), R(1),
@@ -51,10 +50,9 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 40
+bytecode array length: 39
bytecodes: [
- /* 128 E> */ B(StackCheck),
- B(CreateRestParameter),
+ /* 128 E> */ B(CreateRestParameter),
B(Star), R(3),
B(Mov), R(closure), R(1),
B(Mov), R(3), R(2),
@@ -93,10 +91,9 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 119
+bytecode array length: 118
bytecodes: [
- /* 128 E> */ B(StackCheck),
- B(CreateRestParameter),
+ /* 128 E> */ B(CreateRestParameter),
B(Star), R(3),
B(Mov), R(closure), R(1),
B(Mov), R(3), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index ca596e7a5e..5b12e3fc79 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -15,9 +15,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
@@ -50,9 +49,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 38
+bytecode array length: 37
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
@@ -87,9 +85,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
@@ -124,9 +121,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 36
+bytecode array length: 35
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(2),
@@ -161,9 +157,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 44
+bytecode array length: 43
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(TypeOf),
@@ -202,9 +197,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 30
+bytecode array length: 29
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(TypeOf),
@@ -303,9 +297,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 290
+bytecode array length: 289
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
@@ -472,9 +465,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 58
+bytecode array length: 57
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden
index c85f155947..b7821fa66c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden
@@ -13,9 +13,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 27
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -45,9 +44,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 27
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -77,9 +75,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 27
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -109,9 +106,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 50
+bytecode array length: 49
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -153,9 +149,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 35
+bytecode array length: 34
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(2),
@@ -189,10 +184,9 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 45
+bytecode array length: 44
bytecodes: [
- /* 30 E> */ B(StackCheck),
- B(CreateClosure), U8(0), U8(0), U8(2),
+ /* 30 E> */ B(CreateClosure), U8(0), U8(0), U8(2),
B(Star), R(2),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
index 6c65fda872..557d0b4487 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
@@ -14,9 +14,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 3
+bytecode array length: 2
bytecodes: [
- /* 21 E> */ B(StackCheck),
B(LdaUndefined),
/* 25 S> */ B(Return),
]
@@ -33,9 +32,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 21 E> */ B(StackCheck),
B(Mov), R(closure), R(0),
/* 26 S> */ B(Ldar), R(0),
/* 35 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
index 73e203a60b..35926e7711 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaSmi), I8(1),
/* 34 E> */ B(Throw),
]
@@ -28,9 +27,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaConstant), U8(0),
/* 34 E> */ B(Throw),
]
@@ -46,9 +44,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index c3e04111aa..f080925637 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -12,13 +12,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 34
+bytecode array length: 33
bytecodes: [
- /* 0 E> */ B(StackCheck),
B(LdaConstant), U8(0),
B(Star), R(1),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 8 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
B(Star), R(1),
/* 16 E> */ B(CreateClosure), U8(2), U8(0), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index ec70270dae..5a299760ab 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(Mov), R(context), R(0),
/* 40 S> */ B(LdaSmi), I8(1),
/* 49 S> */ B(Return),
@@ -31,7 +30,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [4, 7, 7],
+ [3, 6, 6],
]
---
@@ -42,9 +41,8 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 55
+bytecode array length: 54
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(Mov), R(context), R(1),
/* 47 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
@@ -79,7 +77,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [4, 8, 10],
- [28, 32, 34],
+ [3, 7, 9],
+ [27, 31, 33],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
index 22d4e0c154..d06342c1b8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -12,9 +12,8 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 46
+bytecode array length: 45
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
B(Mov), R(context), R(3),
@@ -45,7 +44,7 @@ bytecodes: [
constant pool: [
]
handlers: [
- [8, 12, 20],
+ [7, 11, 19],
]
---
@@ -55,9 +54,8 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 70
+bytecode array length: 69
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
B(Mov), R(context), R(3),
@@ -101,8 +99,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [8, 36, 44],
- [11, 15, 17],
+ [7, 35, 43],
+ [10, 14, 16],
]
---
@@ -113,9 +111,8 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 90
+bytecode array length: 89
bytecodes: [
- /* 30 E> */ B(StackCheck),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
B(Mov), R(context), R(5),
@@ -170,8 +167,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [4, 56, 64],
- [7, 35, 37],
- [10, 14, 16],
+ [3, 55, 63],
+ [6, 34, 36],
+ [9, 13, 15],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
index bd73b4acdb..ea335954e0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
@@ -16,9 +16,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- /* 10 E> */ B(StackCheck),
/* 24 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 29 S> */ B(TypeOf),
@@ -39,9 +38,8 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- /* 22 E> */ B(StackCheck),
/* 28 S> */ B(LdaGlobalInsideTypeof), U8(0), U8(0),
B(TypeOf),
/* 45 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index b8b4c4bde8..b564000c3b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -15,19 +15,17 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(10),
/* 54 E> */ B(TestEqual), R(0), U8(0),
- B(JumpIfTrue), U8(13),
- /* 45 E> */ B(StackCheck),
+ B(JumpIfTrue), U8(12),
/* 65 S> */ B(Ldar), R(0),
/* 71 E> */ B(AddSmi), I8(10), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(15), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(14), I8(0),
/* 79 S> */ B(Ldar), R(0),
/* 88 S> */ B(Return),
]
@@ -46,19 +44,17 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaFalse),
B(Star), R(0),
- /* 49 E> */ B(StackCheck),
/* 56 S> */ B(Ldar), R(0),
B(ToBooleanLogicalNot),
B(Star), R(0),
/* 74 S> */ B(LdaFalse),
/* 74 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(5),
- B(JumpLoop), U8(12), I8(0),
+ /* 49 E> */ B(JumpLoop), U8(11), I8(0),
/* 85 S> */ B(Ldar), R(0),
/* 94 S> */ B(Return),
]
@@ -74,9 +70,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(101),
B(Star), R(0),
/* 61 S> */ B(MulSmi), I8(3), U8(0),
@@ -96,9 +91,8 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
B(Star), R(0),
/* 64 S> */ B(Mul), R(0), U8(1),
@@ -119,9 +113,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 53 S> */ B(BitwiseNot), U8(0),
@@ -139,9 +132,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 53 S> */ B(ToNumber), U8(0),
@@ -159,9 +151,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 53 S> */ B(Negate), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 066d6e9f03..afbe8e89a4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -169,9 +169,8 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 547
+bytecode array length: 546
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
@@ -659,9 +658,8 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 549
+bytecode array length: 548
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
@@ -1149,9 +1147,8 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 555
+bytecode array length: 554
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
@@ -1645,9 +1642,8 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 576
+bytecode array length: 575
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
@@ -2147,9 +2143,8 @@ snippet: "
"
frame size: 158
parameter count: 1
-bytecode array length: 593
+bytecode array length: 591
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
@@ -2472,8 +2467,7 @@ bytecodes: [
B(Wide), B(Star), R16(128),
/* 2166 S> */ B(LdaSmi), I8(64),
/* 2166 E> */ B(Wide), B(TestLessThan), R16(128), U16(0),
- B(JumpIfFalse), U8(31),
- /* 2146 E> */ B(StackCheck),
+ B(JumpIfFalse), U8(30),
/* 2183 S> */ B(Wide), B(Ldar), R16(128),
/* 2189 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(157),
@@ -2481,7 +2475,7 @@ bytecodes: [
/* 2176 S> */ B(Wide), B(Ldar), R16(128),
B(Inc), U8(2),
B(Wide), B(Star), R16(128),
- B(JumpLoop), U8(36), I8(0),
+ /* 2146 E> */ B(JumpLoop), U8(35), I8(0),
/* 2195 S> */ B(Wide), B(Ldar), R16(128),
/* 2207 S> */ B(Return),
]
@@ -2655,9 +2649,8 @@ snippet: "
"
frame size: 163
parameter count: 1
-bytecode array length: 624
+bytecode array length: 622
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
@@ -2977,25 +2970,24 @@ bytecodes: [
/* 2146 S> */ B(LdaZero),
B(Star), R(1),
/* 2162 S> */ B(Ldar), R(0),
- B(JumpIfUndefinedOrNull), U8(72),
+ B(JumpIfUndefinedOrNull), U8(71),
B(Wide), B(ToObject), R16(157),
B(Wide), B(ForInEnumerate), R16(157),
B(Wide), B(ForInPrepare), R16(158), U16(0),
B(LdaZero),
B(Wide), B(Star), R16(161),
/* 2154 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
- B(JumpIfFalse), U8(45),
+ B(JumpIfFalse), U8(44),
B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(0),
- B(JumpIfUndefined), U8(22),
+ B(JumpIfUndefined), U8(21),
B(Wide), B(Star), R16(128),
- /* 2149 E> */ B(StackCheck),
/* 2169 S> */ B(Wide), B(Ldar), R16(128),
/* 2175 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(162),
B(Star), R(1),
/* 2172 E> */ B(Wide), B(ForInStep), R16(161),
B(Wide), B(Star), R16(161),
- B(JumpLoop), U8(48), I8(0),
+ /* 2149 E> */ B(JumpLoop), U8(47), I8(0),
/* 2181 S> */ B(Ldar), R(1),
/* 2191 S> */ B(Return),
]
@@ -3170,9 +3162,8 @@ snippet: "
"
frame size: 159
parameter count: 1
-bytecode array length: 590
+bytecode array length: 589
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaZero),
B(Star), R(0),
/* 55 S> */ B(LdaZero),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
index 0b44c07300..0ffcdab0da 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -11,9 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- /* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(ToObject), R(0),
B(CreateWithContext), R(0), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index fbc2666c10..64c9a90ca4 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -20,34 +20,54 @@ namespace v8 {
namespace internal {
namespace interpreter {
-MaybeHandle<Object> CallInterpreter(Isolate* isolate,
- Handle<JSFunction> function);
template <class... A>
static MaybeHandle<Object> CallInterpreter(Isolate* isolate,
Handle<JSFunction> function,
- A... args) {
- Handle<Object> argv[] = {args...};
- return Execution::Call(isolate, function,
- isolate->factory()->undefined_value(), sizeof...(args),
- argv);
+ Handle<Object> receiver, A... args) {
+ // Pad the array with an empty handle to ensure that argv size is at least 1.
+ // It avoids MSVC error C2466.
+ Handle<Object> argv[] = {args..., Handle<Object>()};
+ return Execution::Call(isolate, function, receiver, sizeof...(args), argv);
}
template <class... A>
class InterpreterCallable {
public:
+ virtual ~InterpreterCallable() = default;
+
+ FeedbackVector vector() const { return function_->feedback_vector(); }
+
+ protected:
InterpreterCallable(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate), function_(function) {}
- virtual ~InterpreterCallable() = default;
+
+ Isolate* isolate_;
+ Handle<JSFunction> function_;
+};
+
+template <class... A>
+class InterpreterCallableUndefinedReceiver : public InterpreterCallable<A...> {
+ public:
+ InterpreterCallableUndefinedReceiver(Isolate* isolate,
+ Handle<JSFunction> function)
+ : InterpreterCallable<A...>(isolate, function) {}
MaybeHandle<Object> operator()(A... args) {
- return CallInterpreter(isolate_, function_, args...);
+ return CallInterpreter(this->isolate_, this->function_,
+ this->isolate_->factory()->undefined_value(),
+ args...);
}
+};
- FeedbackVector vector() const { return function_->feedback_vector(); }
+template <class... A>
+class InterpreterCallableWithReceiver : public InterpreterCallable<A...> {
+ public:
+ InterpreterCallableWithReceiver(Isolate* isolate, Handle<JSFunction> function)
+ : InterpreterCallable<A...>(isolate, function) {}
- private:
- Isolate* isolate_;
- Handle<JSFunction> function_;
+ MaybeHandle<Object> operator()(Handle<Object> receiver, A... args) {
+ return CallInterpreter(this->isolate_, this->function_, receiver, args...);
+ }
};
class InterpreterTester {
@@ -68,8 +88,15 @@ class InterpreterTester {
virtual ~InterpreterTester();
template <class... A>
- InterpreterCallable<A...> GetCallable() {
- return InterpreterCallable<A...>(isolate_, GetBytecodeFunction<A...>());
+ InterpreterCallableUndefinedReceiver<A...> GetCallable() {
+ return InterpreterCallableUndefinedReceiver<A...>(
+ isolate_, GetBytecodeFunction<A...>());
+ }
+
+ template <class... A>
+ InterpreterCallableWithReceiver<A...> GetCallableWithReceiver() {
+ return InterpreterCallableWithReceiver<A...>(isolate_,
+ GetBytecodeFunction<A...>());
}
Local<Message> CheckThrowsReturnMessage();
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index 3e1c006f20..6889747e17 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -28,9 +28,23 @@ class InvokeIntrinsicHelper {
template <class... A>
Handle<Object> Invoke(A... args) {
CHECK(IntrinsicsHelper::IsSupported(function_id_));
- BytecodeArrayBuilder builder(zone_, sizeof...(args), 0, nullptr);
+ int parameter_count = sizeof...(args);
+#ifdef V8_REVERSE_JSARGS
+ // Move the parameter to locals, since the order of the
+ // arguments in the stack is reversed.
+ BytecodeArrayBuilder builder(zone_, parameter_count + 1, parameter_count,
+ nullptr);
+ for (int i = 0; i < parameter_count; i++) {
+ builder.MoveRegister(builder.Parameter(i), builder.Local(i));
+ }
+ RegisterList reg_list =
+ InterpreterTester::NewRegisterList(0, parameter_count);
+#else
+ // Add the receiver in the parameter count.
+ BytecodeArrayBuilder builder(zone_, parameter_count + 1, 0, nullptr);
RegisterList reg_list = InterpreterTester::NewRegisterList(
- builder.Receiver().index(), sizeof...(args));
+ builder.Parameter(0).index(), parameter_count);
+#endif
builder.CallRuntime(function_id_, reg_list).Return();
InterpreterTester tester(isolate_, builder.ToBytecodeArray(isolate_));
auto callable = tester.GetCallable<A...>();
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 8f687fdc8b..33daf581c7 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -170,7 +170,7 @@ TEST(InterpreterLoadLiteral) {
builder.LoadLiteral(-2.1e19).Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array);
@@ -189,14 +189,13 @@ TEST(InterpreterLoadLiteral) {
const AstRawString* raw_string = ast_factory.GetOneByteString("String");
builder.LoadLiteral(raw_string).Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- CHECK(i::String::cast(*return_val)
- .Equals(*raw_string->string().get<Factory>()));
+ CHECK(i::String::cast(*return_val).Equals(*raw_string->string()));
}
}
@@ -535,7 +534,7 @@ TEST(InterpreterStringAdd) {
builder.LoadLiteral(test_cases[i].lhs).StoreAccumulatorInRegister(reg);
LoadLiteralForTest(&builder, test_cases[i].rhs);
builder.BinaryOperation(Token::Value::ADD, reg, GetIndex(slot)).Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -551,7 +550,7 @@ TEST(InterpreterStringAdd) {
}
}
-TEST(InterpreterParameter1) {
+TEST(InterpreterReceiverParameter) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -560,6 +559,24 @@ TEST(InterpreterParameter1) {
builder.LoadAccumulatorWithRegister(builder.Receiver()).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
+
+ InterpreterTester tester(isolate, bytecode_array);
+ auto callable = tester.GetCallableWithReceiver<>();
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+
+ CHECK(return_val.is_identical_to(object));
+}
+
+TEST(InterpreterParameter0) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Zone* zone = handles.main_zone();
+ BytecodeArrayBuilder builder(zone, 2, 0);
+
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0)).Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<Handle<Object>>();
@@ -603,12 +620,12 @@ TEST(InterpreterParameter8) {
.BinaryOperation(Token::Value::ADD, builder.Parameter(5), GetIndex(slot5))
.BinaryOperation(Token::Value::ADD, builder.Parameter(6), GetIndex(slot6))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
using H = Handle<Object>;
- auto callable = tester.GetCallable<H, H, H, H, H, H, H, H>();
+ auto callable = tester.GetCallableWithReceiver<H, H, H, H, H, H, H>();
Handle<Smi> arg1 = Handle<Smi>(Smi::FromInt(1), handles.main_isolate());
Handle<Smi> arg2 = Handle<Smi>(Smi::FromInt(2), handles.main_isolate());
@@ -743,7 +760,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
LoadLiteralForTest(&builder, test_case.arg2);
builder.BinaryOperation(test_case.op, reg, GetIndex(slot0)).Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -849,7 +866,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
.BinaryOperation(test_case.op, reg, GetIndex(slot0))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -891,7 +908,7 @@ TEST(InterpreterUnaryOpFeedback) {
{Token::Value::DEC, smi_one, smi_min, number, bigint, str}};
for (TestCase const& test_case : kTestCases) {
i::FeedbackVectorSpec feedback_spec(zone);
- BytecodeArrayBuilder builder(zone, 5, 0, &feedback_spec);
+ BytecodeArrayBuilder builder(zone, 6, 0, &feedback_spec);
i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
i::FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
@@ -902,15 +919,15 @@ TEST(InterpreterUnaryOpFeedback) {
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
- builder.LoadAccumulatorWithRegister(builder.Receiver())
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0))
.UnaryOperation(test_case.op, GetIndex(slot0))
- .LoadAccumulatorWithRegister(builder.Parameter(0))
- .UnaryOperation(test_case.op, GetIndex(slot1))
.LoadAccumulatorWithRegister(builder.Parameter(1))
- .UnaryOperation(test_case.op, GetIndex(slot2))
+ .UnaryOperation(test_case.op, GetIndex(slot1))
.LoadAccumulatorWithRegister(builder.Parameter(2))
- .UnaryOperation(test_case.op, GetIndex(slot3))
+ .UnaryOperation(test_case.op, GetIndex(slot2))
.LoadAccumulatorWithRegister(builder.Parameter(3))
+ .UnaryOperation(test_case.op, GetIndex(slot3))
+ .LoadAccumulatorWithRegister(builder.Parameter(4))
.UnaryOperation(test_case.op, GetIndex(slot4))
.Return();
@@ -959,7 +976,7 @@ TEST(InterpreterBitwiseTypeFeedback) {
for (Token::Value op : kBitwiseBinaryOperators) {
i::FeedbackVectorSpec feedback_spec(zone);
- BytecodeArrayBuilder builder(zone, 4, 0, &feedback_spec);
+ BytecodeArrayBuilder builder(zone, 5, 0, &feedback_spec);
i::FeedbackSlot slot0 = feedback_spec.AddBinaryOpICSlot();
i::FeedbackSlot slot1 = feedback_spec.AddBinaryOpICSlot();
@@ -968,10 +985,10 @@ TEST(InterpreterBitwiseTypeFeedback) {
Handle<i::FeedbackMetadata> metadata =
i::NewFeedbackMetadata(isolate, &feedback_spec);
- builder.LoadAccumulatorWithRegister(builder.Receiver())
- .BinaryOperation(op, builder.Parameter(0), GetIndex(slot0))
- .BinaryOperation(op, builder.Parameter(1), GetIndex(slot1))
- .BinaryOperation(op, builder.Parameter(2), GetIndex(slot2))
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0))
+ .BinaryOperation(op, builder.Parameter(1), GetIndex(slot0))
+ .BinaryOperation(op, builder.Parameter(2), GetIndex(slot1))
+ .BinaryOperation(op, builder.Parameter(3), GetIndex(slot2))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1015,7 +1032,7 @@ TEST(InterpreterParameter1Assign) {
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> return_val =
callable(Handle<Smi>(Smi::FromInt(3), handles.main_isolate()))
@@ -1146,11 +1163,11 @@ TEST(InterpreterLoadNamedProperty) {
BytecodeArrayBuilder builder(zone, 1, 0, &feedback_spec);
builder.LoadNamedProperty(builder.Receiver(), name, GetIndex(slot)).Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
// Test IC miss.
@@ -1200,11 +1217,11 @@ TEST(InterpreterLoadKeyedProperty) {
builder.LoadLiteral(key)
.LoadKeyedProperty(builder.Receiver(), GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject("({ key : 123 })");
// Test IC miss.
@@ -1243,11 +1260,11 @@ TEST(InterpreterStoreNamedProperty) {
.StoreNamedProperty(builder.Receiver(), name, GetIndex(slot),
LanguageMode::kStrict)
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
// Test IC miss.
Handle<Object> result;
@@ -1308,11 +1325,11 @@ TEST(InterpreterStoreKeyedProperty) {
.StoreKeyedProperty(builder.Receiver(), Register(0), GetIndex(slot),
i::LanguageMode::kSloppy)
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
// Test IC miss.
Handle<Object> result;
@@ -1368,11 +1385,11 @@ TEST(InterpreterCall) {
builder.CallProperty(reg, args, call_slot_index);
builder.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject(
"new (function Obj() { this.func = function() { return 0x265; }})()");
@@ -1390,11 +1407,11 @@ TEST(InterpreterCall) {
.MoveRegister(builder.Receiver(), args[0]);
builder.CallProperty(reg, args, call_slot_index);
builder.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject(
"new (function Obj() {"
@@ -1424,11 +1441,11 @@ TEST(InterpreterCall) {
builder.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject(
"new (function Obj() { "
@@ -1473,11 +1490,11 @@ TEST(InterpreterCall) {
builder.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- auto callable = tester.GetCallable<Handle<Object>>();
+ auto callable = tester.GetCallableWithReceiver<>();
Handle<Object> object = InterpreterTester::NewObject(
"new (function Obj() { "
@@ -1538,7 +1555,7 @@ TEST(InterpreterJumps) {
IncrementRegister(&builder, reg, 1, scratch, GetIndex(slot)).Jump(&label[1]);
SetRegister(&builder, reg, 2048, scratch).Bind(&label[0]);
IncrementRegister(&builder, reg, 2, scratch, GetIndex(slot1))
- .JumpLoop(&loop_header, 0);
+ .JumpLoop(&loop_header, 0, 0);
SetRegister(&builder, reg, 4096, scratch).Bind(&label[1]);
IncrementRegister(&builder, reg, 4, scratch, GetIndex(slot2))
.LoadAccumulatorWithRegister(reg)
@@ -1691,7 +1708,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
builder.LoadAccumulatorWithRegister(reg);
builder.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
BytecodeArrayIterator iterator(bytecode_array);
@@ -1735,7 +1752,7 @@ TEST(InterpreterJumpWith32BitOperand) {
builder.Bind(&done);
builder.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
BytecodeArrayIterator iterator(bytecode_array);
@@ -1873,7 +1890,7 @@ TEST(InterpreterHeapNumberComparisons) {
.CompareOperation(comparison, r0, GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
@@ -1920,7 +1937,7 @@ TEST(InterpreterBigIntComparisons) {
.CompareOperation(comparison, r0, GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
@@ -1968,7 +1985,7 @@ TEST(InterpreterStringComparisons) {
.CompareOperation(comparison, r0, GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
@@ -2080,7 +2097,7 @@ TEST(InterpreterMixedComparisons) {
builder.CompareOperation(comparison, lhs_reg, GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array =
builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -2208,8 +2225,8 @@ TEST(InterpreterCompareTypeOf) {
LiteralFlag literal_flag = kLiterals[l];
if (literal_flag == LiteralFlag::kOther) continue;
- BytecodeArrayBuilder builder(zone, 1, 0);
- builder.LoadAccumulatorWithRegister(builder.Receiver())
+ BytecodeArrayBuilder builder(zone, 2, 0);
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0))
.CompareTypeOf(kLiterals[l])
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -2295,7 +2312,7 @@ TEST(InterpreterTestIn) {
.CompareOperation(Token::Value::IN, r0, GetIndex(slot))
.Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
@@ -2353,7 +2370,7 @@ TEST(InterpreterUnaryNotNonBoolean) {
Register r0(0);
LoadLiteralForTest(&builder, object_type_tuples[i].first);
builder.LogicalNot(ToBooleanMode::kConvertToBoolean).Return();
- ast_factory.Internalize(isolate->factory());
+ ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
@@ -5012,7 +5029,7 @@ TEST(InterpreterGenerators) {
}
}
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
TEST(InterpreterWithNativeStack) {
i::FLAG_interpreted_frames_native_stack = true;
@@ -5034,7 +5051,7 @@ TEST(InterpreterWithNativeStack) {
CHECK(code.is_interpreter_trampoline_builtin());
CHECK_NE(code.address(), interpreter_entry_trampoline->address());
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
TEST(InterpreterGetBytecodeHandler) {
HandleAndZoneScope handles;
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index 28687cef5b..35b7048bb0 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -331,8 +331,8 @@ TEST(Utf8AdvanceUntilOverChunkBoundaries) {
for (size_t i = 1; i < len; i++) {
// Copy source string into buffer, splitting it at i.
// Then add three chunks, 0..i-1, i..strlen-1, empty.
- memcpy(buffer, unicode_utf8, i);
- memcpy(buffer + i + 1, unicode_utf8 + i, len - i);
+ strncpy(buffer, unicode_utf8, i);
+ strncpy(buffer + i + 1, unicode_utf8 + i, len - i);
buffer[i] = '\0';
buffer[len + 1] = '\n';
buffer[len + 2] = '\0';
@@ -360,8 +360,8 @@ TEST(Utf8ChunkBoundaries) {
for (size_t i = 1; i < len; i++) {
// Copy source string into buffer, splitting it at i.
// Then add three chunks, 0..i-1, i..strlen-1, empty.
- memcpy(buffer, unicode_utf8, i);
- memcpy(buffer + i + 1, unicode_utf8 + i, len - i);
+ strncpy(buffer, unicode_utf8, i);
+ strncpy(buffer + i + 1, unicode_utf8 + i, len - i);
buffer[i] = '\0';
buffer[len + 1] = '\0';
buffer[len + 2] = '\0';
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index e0c93501c2..ec14e8d0c2 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -298,13 +298,10 @@ static void HandleAllocatingGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
for (int i = 0; i < C; i++) {
- v8::String::NewFromUtf8(info.GetIsolate(), "foo",
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ USE(v8::String::NewFromUtf8Literal(info.GetIsolate(), "foo"));
}
- info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "foo",
- v8::NewStringType::kNormal)
- .ToLocalChecked());
+ info.GetReturnValue().Set(
+ v8::String::NewFromUtf8Literal(info.GetIsolate(), "foo"));
}
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index 9ce26323d8..b15fe80151 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -636,6 +636,43 @@ TEST(SharedArrayBuffer_NewBackingStore_CustomDeleter) {
CHECK(backing_store_custom_called);
}
+TEST(ArrayBuffer_NewBackingStore_EmptyDeleter) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ char static_buffer[100];
+ std::unique_ptr<v8::BackingStore> backing_store =
+ v8::ArrayBuffer::NewBackingStore(static_buffer, sizeof(static_buffer),
+ v8::BackingStore::EmptyDeleter, nullptr);
+ uint64_t external_memory_before =
+ isolate->AdjustAmountOfExternalAllocatedMemory(0);
+ v8::ArrayBuffer::New(isolate, std::move(backing_store));
+ uint64_t external_memory_after =
+ isolate->AdjustAmountOfExternalAllocatedMemory(0);
+ // The ArrayBuffer constructor does not increase the external memory counter.
+ // The counter may decrease however if the allocation triggers GC.
+ CHECK_GE(external_memory_before, external_memory_after);
+}
+
+TEST(SharedArrayBuffer_NewBackingStore_EmptyDeleter) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ char static_buffer[100];
+ std::unique_ptr<v8::BackingStore> backing_store =
+ v8::SharedArrayBuffer::NewBackingStore(
+ static_buffer, sizeof(static_buffer), v8::BackingStore::EmptyDeleter,
+ nullptr);
+ uint64_t external_memory_before =
+ isolate->AdjustAmountOfExternalAllocatedMemory(0);
+ v8::SharedArrayBuffer::New(isolate, std::move(backing_store));
+ uint64_t external_memory_after =
+ isolate->AdjustAmountOfExternalAllocatedMemory(0);
+ // The SharedArrayBuffer constructor does not increase the external memory
+ // counter. The counter may decrease however if the allocation triggers GC.
+ CHECK_GE(external_memory_before, external_memory_after);
+}
+
THREADED_TEST(BackingStore_NotShared) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -729,46 +766,6 @@ TEST(BackingStore_HoldAllocatorAlive_UntilIsolateShutdown) {
CHECK(allocator_weak.expired());
}
-class NullptrAllocator final : public v8::ArrayBuffer::Allocator {
- public:
- void* Allocate(size_t length) override {
- CHECK_EQ(length, 0);
- return nullptr;
- }
- void* AllocateUninitialized(size_t length) override {
- CHECK_EQ(length, 0);
- return nullptr;
- }
- void Free(void* data, size_t length) override { CHECK_EQ(data, nullptr); }
-};
-
-TEST(BackingStore_ReleaseAllocator_NullptrBackingStore) {
- std::shared_ptr<NullptrAllocator> allocator =
- std::make_shared<NullptrAllocator>();
- std::weak_ptr<NullptrAllocator> allocator_weak(allocator);
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator_shared = allocator;
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- isolate->Enter();
-
- allocator.reset();
- create_params.array_buffer_allocator_shared.reset();
- CHECK(!allocator_weak.expired());
-
- {
- std::shared_ptr<v8::BackingStore> backing_store =
- v8::ArrayBuffer::NewBackingStore(isolate, 0);
- // This should release a reference to the allocator, even though the
- // buffer is empty/nullptr.
- backing_store.reset();
- }
-
- isolate->Exit();
- isolate->Dispose();
- CHECK(allocator_weak.expired());
-}
-
TEST(BackingStore_HoldAllocatorAlive_AfterIsolateShutdown) {
std::shared_ptr<DummyAllocator> allocator =
std::make_shared<DummyAllocator>();
@@ -801,3 +798,74 @@ TEST(BackingStore_HoldAllocatorAlive_AfterIsolateShutdown) {
backing_store.reset();
CHECK(allocator_weak.expired());
}
+
+TEST(BackingStore_ReallocateExpand) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ std::unique_ptr<v8::BackingStore> backing_store =
+ v8::ArrayBuffer::NewBackingStore(isolate, 10);
+ {
+ uint8_t* data = reinterpret_cast<uint8_t*>(
+ reinterpret_cast<uintptr_t>(backing_store->Data()));
+ for (uint8_t i = 0; i < 10; i++) {
+ data[i] = i;
+ }
+ }
+ std::unique_ptr<v8::BackingStore> new_backing_store =
+ v8::BackingStore::Reallocate(isolate, std::move(backing_store), 20);
+ CHECK_EQ(new_backing_store->ByteLength(), 20);
+ CHECK(!new_backing_store->IsShared());
+ {
+ uint8_t* data = reinterpret_cast<uint8_t*>(
+ reinterpret_cast<uintptr_t>(new_backing_store->Data()));
+ for (uint8_t i = 0; i < 10; i++) {
+ CHECK_EQ(data[i], i);
+ }
+ for (uint8_t i = 10; i < 20; i++) {
+ CHECK_EQ(data[i], 0);
+ }
+ }
+}
+
+TEST(BackingStore_ReallocateShrink) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ std::unique_ptr<v8::BackingStore> backing_store =
+ v8::ArrayBuffer::NewBackingStore(isolate, 20);
+ {
+ uint8_t* data = reinterpret_cast<uint8_t*>(backing_store->Data());
+ for (uint8_t i = 0; i < 20; i++) {
+ data[i] = i;
+ }
+ }
+ std::unique_ptr<v8::BackingStore> new_backing_store =
+ v8::BackingStore::Reallocate(isolate, std::move(backing_store), 10);
+ CHECK_EQ(new_backing_store->ByteLength(), 10);
+ CHECK(!new_backing_store->IsShared());
+ {
+ uint8_t* data = reinterpret_cast<uint8_t*>(new_backing_store->Data());
+ for (uint8_t i = 0; i < 10; i++) {
+ CHECK_EQ(data[i], i);
+ }
+ }
+}
+
+TEST(BackingStore_ReallocateNotShared) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ std::unique_ptr<v8::BackingStore> backing_store =
+ v8::ArrayBuffer::NewBackingStore(isolate, 20);
+ std::unique_ptr<v8::BackingStore> new_backing_store =
+ v8::BackingStore::Reallocate(isolate, std::move(backing_store), 10);
+ CHECK(!new_backing_store->IsShared());
+}
+
+TEST(BackingStore_ReallocateShared) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ std::unique_ptr<v8::BackingStore> backing_store =
+ v8::SharedArrayBuffer::NewBackingStore(isolate, 20);
+ std::unique_ptr<v8::BackingStore> new_backing_store =
+ v8::BackingStore::Reallocate(isolate, std::move(backing_store), 10);
+ CHECK(new_backing_store->IsShared());
+}
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index bceba18c4a..cc6795ba51 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -339,7 +339,7 @@ TEST(CaptureStackTrace) {
CHECK(detailed_result->IsObject());
// Test using function.name and function.displayName in stack trace
- const char* function_name_source =
+ const char function_name_source[] =
"function bar(function_name, display_name, testGroup) {\n"
" var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
" if (function_name) {\n"
@@ -356,9 +356,7 @@ TEST(CaptureStackTrace) {
"bar('function.name', 239, 6);\n"
"bar(239, undefined, 7);\n";
v8::Local<v8::String> function_name_src =
- v8::String::NewFromUtf8(isolate, function_name_source,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(isolate, function_name_source);
v8::ScriptCompiler::Source script_source3(function_name_src,
v8::ScriptOrigin(origin));
v8::Local<Value> function_name_result(
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 5f84bfffcd..7f4b300203 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -37,6 +37,7 @@
#include <unistd.h> // NOLINT
#endif
+#include "include/v8-fast-api-calls.h"
#include "include/v8-util.h"
#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
@@ -561,8 +562,7 @@ THREADED_TEST(ScriptMakingExternalString) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<String> source =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
- v8::NewStringType::kNormal)
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_source)
.ToLocalChecked();
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -624,15 +624,13 @@ TEST(MakingExternalStringConditions) {
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
Local<String> tiny_local_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
- v8::NewStringType::kNormal)
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string)
.ToLocalChecked();
i::DeleteArray(two_byte_string);
two_byte_string = AsciiToTwoByteString("s1234");
Local<String> local_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
- v8::NewStringType::kNormal)
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string)
.ToLocalChecked();
i::DeleteArray(two_byte_string);
@@ -942,8 +940,7 @@ THREADED_TEST(StringConcat) {
uint16_t* two_byte_source = AsciiToTwoByteString(two_byte_string_1);
Local<String> right =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
- v8::NewStringType::kNormal)
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_source)
.ToLocalChecked();
i::DeleteArray(two_byte_source);
@@ -962,8 +959,7 @@ THREADED_TEST(StringConcat) {
source = String::Concat(isolate, source, right);
two_byte_source = AsciiToTwoByteString(two_byte_string_2);
- right = String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
- v8::NewStringType::kNormal)
+ right = String::NewFromTwoByte(env->GetIsolate(), two_byte_source)
.ToLocalChecked();
i::DeleteArray(two_byte_source);
@@ -3380,9 +3376,7 @@ THREADED_TEST(PrivatePropertiesOnProxies) {
CHECK(priv2->Name()
->Equals(env.local(),
- v8::String::NewFromUtf8(isolate, "my-private",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
+ v8::String::NewFromUtf8Literal(isolate, "my-private"))
.FromJust());
// Make sure delete of a non-existent private symbol property works.
@@ -3408,10 +3402,9 @@ THREADED_TEST(PrivatePropertiesOnProxies) {
proxy->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
unsigned num_props =
proxy->GetPropertyNames(env.local()).ToLocalChecked()->Length();
- CHECK(proxy->Set(env.local(), v8::String::NewFromUtf8(
- isolate, "bla", v8::NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Integer::New(isolate, 20))
+ CHECK(proxy
+ ->Set(env.local(), v8::String::NewFromUtf8Literal(isolate, "bla"),
+ v8::Integer::New(isolate, 20))
.FromJust());
CHECK_EQ(1u,
proxy->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
@@ -3474,9 +3467,7 @@ THREADED_TEST(PrivateProperties) {
CHECK(priv2->Name()
->Equals(env.local(),
- v8::String::NewFromUtf8(isolate, "my-private",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
+ v8::String::NewFromUtf8Literal(isolate, "my-private"))
.FromJust());
// Make sure delete of a non-existent private symbol property works.
@@ -3502,9 +3493,7 @@ THREADED_TEST(PrivateProperties) {
obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
unsigned num_props =
obj->GetPropertyNames(env.local()).ToLocalChecked()->Length();
- CHECK(obj->Set(env.local(), v8::String::NewFromUtf8(
- isolate, "bla", v8::NewStringType::kNormal)
- .ToLocalChecked(),
+ CHECK(obj->Set(env.local(), v8::String::NewFromUtf8Literal(isolate, "bla"),
v8::Integer::New(isolate, 20))
.FromJust());
CHECK_EQ(1u,
@@ -3999,9 +3988,8 @@ class TwoPassCallbackData {
HandleScope scope(isolate);
i::ScopedVector<char> buffer(40);
i::SNPrintF(buffer, "%p", static_cast<void*>(this));
- auto string = v8::String::NewFromUtf8(isolate, buffer.begin(),
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ auto string =
+ v8::String::NewFromUtf8(isolate, buffer.begin()).ToLocalChecked();
cell_.Reset(isolate, string);
metadata_->instance_counter++;
}
@@ -6841,6 +6829,9 @@ THREADED_TEST(UndetectableObject) {
.ToLocalChecked()
->NewInstance(env.local())
.ToLocalChecked();
+
+ CHECK(obj->IsUndetectable());
+
CHECK(
env->Global()->Set(env.local(), v8_str("undetectable"), obj).FromJust());
@@ -8077,9 +8068,7 @@ THREADED_TEST(StringWrite) {
// abc<Icelandic eth><Unicode snowman>.
v8::Local<String> str2 = v8_str("abc\xC3\xB0\xE2\x98\x83");
v8::Local<String> str3 =
- v8::String::NewFromUtf8(context->GetIsolate(), "abc\0def",
- v8::NewStringType::kNormal, 7)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(context->GetIsolate(), "abc\0def");
// "ab" + lead surrogate + "wx" + trail surrogate + "yz"
uint16_t orphans[8] = {0x61, 0x62, 0xD800, 0x77, 0x78, 0xDC00, 0x79, 0x7A};
v8::Local<String> orphans_str =
@@ -8535,14 +8524,10 @@ THREADED_TEST(Utf16Symbol) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<String> symbol1 =
- v8::String::NewFromUtf8(context->GetIsolate(), "abc",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> symbol2 =
- v8::String::NewFromUtf8(context->GetIsolate(), "abc",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ Local<String> symbol1 = v8::String::NewFromUtf8Literal(
+ context->GetIsolate(), "abc", v8::NewStringType::kInternalized);
+ Local<String> symbol2 = v8::String::NewFromUtf8Literal(
+ context->GetIsolate(), "abc", v8::NewStringType::kInternalized);
CHECK(SameSymbol(symbol1, symbol2));
CompileRun(
@@ -8560,30 +8545,22 @@ THREADED_TEST(Utf16Symbol) {
"if (sym3.charCodeAt(2) != 0xDC07) throw sym1.charCodeAt(2);"
"if (sym4.length != 3) throw sym4;"
"if (sym4.charCodeAt(2) != 0xDC08) throw sym2.charCodeAt(2);");
- Local<String> sym0 =
- v8::String::NewFromUtf8(context->GetIsolate(), "benedictus",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> sym0b =
- v8::String::NewFromUtf8(context->GetIsolate(), "S\xC3\xB8ren",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> sym1 =
- v8::String::NewFromUtf8(context->GetIsolate(), "\xED\xA0\x81\xED\xB0\x87",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ Local<String> sym0 = v8::String::NewFromUtf8Literal(
+ context->GetIsolate(), "benedictus", v8::NewStringType::kInternalized);
+ Local<String> sym0b = v8::String::NewFromUtf8Literal(
+ context->GetIsolate(), "S\xC3\xB8ren", v8::NewStringType::kInternalized);
+ Local<String> sym1 = v8::String::NewFromUtf8Literal(
+ context->GetIsolate(), "\xED\xA0\x81\xED\xB0\x87",
+ v8::NewStringType::kInternalized);
Local<String> sym2 =
- v8::String::NewFromUtf8(context->GetIsolate(), "\xF0\x90\x90\x88",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> sym3 = v8::String::NewFromUtf8(context->GetIsolate(),
- "x\xED\xA0\x81\xED\xB0\x87",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(context->GetIsolate(), "\xF0\x90\x90\x88",
+ v8::NewStringType::kInternalized);
+ Local<String> sym3 = v8::String::NewFromUtf8Literal(
+ context->GetIsolate(), "x\xED\xA0\x81\xED\xB0\x87",
+ v8::NewStringType::kInternalized);
Local<String> sym4 =
- v8::String::NewFromUtf8(context->GetIsolate(), "x\xF0\x90\x90\x88",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(context->GetIsolate(), "x\xF0\x90\x90\x88",
+ v8::NewStringType::kInternalized);
v8::Local<v8::Object> global = context->Global();
Local<Value> s0 =
global->Get(context.local(), v8_str("sym0")).ToLocalChecked();
@@ -13747,7 +13724,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
const int kIterations = 10;
for (int i = 0; i < kIterations; ++i) {
LocalContext env(isolate);
- i::AlwaysAllocateScope always_allocate(i_isolate);
+ i::AlwaysAllocateScopeForTesting always_allocate(heap);
CompileRun(script);
// Keep a strong reference to the code object in the handle scope.
@@ -15320,19 +15297,15 @@ THREADED_TEST(ReplaceConstantFunction) {
THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
- const char *source = "foo";
+ const char source[] = "foo";
v8::Local<v8::Script> dep = v8_compile(source);
v8::ScriptCompiler::Source script_source(
- v8::String::NewFromUtf8(c1->GetIsolate(), source,
- v8::NewStringType::kNormal)
- .ToLocalChecked());
+ v8::String::NewFromUtf8Literal(c1->GetIsolate(), source));
v8::Local<v8::UnboundScript> indep =
v8::ScriptCompiler::CompileUnboundScript(c1->GetIsolate(), &script_source)
.ToLocalChecked();
c1->Global()
- ->Set(c1.local(), v8::String::NewFromUtf8(c1->GetIsolate(), "foo",
- v8::NewStringType::kNormal)
- .ToLocalChecked(),
+ ->Set(c1.local(), v8::String::NewFromUtf8Literal(c1->GetIsolate(), "foo"),
v8::Integer::New(c1->GetIsolate(), 100))
.FromJust();
CHECK_EQ(
@@ -15346,9 +15319,7 @@ THREADED_TEST(ScriptContextDependence) {
100);
LocalContext c2;
c2->Global()
- ->Set(c2.local(), v8::String::NewFromUtf8(c2->GetIsolate(), "foo",
- v8::NewStringType::kNormal)
- .ToLocalChecked(),
+ ->Set(c2.local(), v8::String::NewFromUtf8Literal(c2->GetIsolate(), "foo"),
v8::Integer::New(c2->GetIsolate(), 101))
.FromJust();
CHECK_EQ(
@@ -16444,7 +16415,7 @@ TEST(RecursionWithSourceURLInMessageScriptResourceNameOrSourceURL) {
static void CreateGarbageInOldSpace() {
i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
- i::AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ i::AlwaysAllocateScopeForTesting always_allocate(CcTest::i_isolate()->heap());
for (int i = 0; i < 1000; i++) {
factory->NewFixedArray(1000, i::AllocationType::kOld);
}
@@ -16764,7 +16735,7 @@ TEST(VisitExternalStrings) {
v8::Isolate* isolate = CcTest::isolate();
LocalContext env;
v8::HandleScope scope(isolate);
- const char* string = "Some string";
+ const char string[] = "Some string";
uint16_t* two_byte_string = AsciiToTwoByteString(string);
TestResource* resource[4];
resource[0] = new TestResource(two_byte_string);
@@ -16778,10 +16749,8 @@ TEST(VisitExternalStrings) {
// Externalized symbol.
resource[2] = new TestResource(two_byte_string, nullptr, false);
- v8::Local<v8::String> string2 =
- v8::String::NewFromUtf8(env->GetIsolate(), string,
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ v8::Local<v8::String> string2 = v8::String::NewFromUtf8Literal(
+ env->GetIsolate(), string, v8::NewStringType::kInternalized);
CHECK(string2->MakeExternal(resource[2]));
// Symbolized External.
@@ -17282,8 +17251,7 @@ THREADED_TEST(FunctionGetDebugName) {
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(),
- v8::String::NewFromUtf8(isolate, functions[i * 2],
- v8::NewStringType::kNormal)
+ v8::String::NewFromUtf8(isolate, functions[i * 2])
.ToLocalChecked())
.ToLocalChecked());
CHECK_EQ(0, strcmp(functions[i * 2 + 1],
@@ -18021,9 +17989,7 @@ TEST(ContainsOnlyOneByte) {
.ToLocalChecked();
CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
// Counter example.
- string = String::NewFromTwoByte(isolate, string_contents,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ string = String::NewFromTwoByte(isolate, string_contents).ToLocalChecked();
CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
// Test left right and balanced cons strings.
Local<String> base = v8_str("a");
@@ -19319,9 +19285,7 @@ v8::ModifyCodeGenerationFromStringsResult ModifyCodeGeneration(
if (*i == '2') *i = '3';
}
return {/* codegen_allowed= */ true,
- String::NewFromUtf8(context->GetIsolate(), *utf8,
- v8::NewStringType::kNormal)
- .ToLocalChecked()};
+ String::NewFromUtf8(context->GetIsolate(), *utf8).ToLocalChecked()};
}
THREADED_TEST(AllowCodeGenFromStrings) {
@@ -19911,7 +19875,7 @@ TEST(RunMicrotasksIgnoresThrownExceptionsFromApi) {
CHECK(!isolate->IsExecutionTerminating());
isolate->EnqueueMicrotask(ThrowExceptionMicrotask);
isolate->EnqueueMicrotask(IncrementCounterMicrotask);
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(1, microtask_callback_count);
CHECK(!try_catch.HasCaught());
}
@@ -19928,21 +19892,25 @@ TEST(SetAutorunMicrotasks) {
v8::HandleScope scope(env->GetIsolate());
env->GetIsolate()->AddMicrotasksCompletedCallback(
&MicrotasksCompletedCallback);
+
+ // If the policy is auto, there's a microtask checkpoint at the end of every
+ // zero-depth API call.
CompileRun(
"var ext1Calls = 0;"
"var ext2Calls = 0;");
CompileRun("1+1;");
CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(0u, microtasks_completed_callback_count);
+ CHECK_EQ(4u, microtasks_completed_callback_count);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskOne).ToLocalChecked());
CompileRun("1+1;");
CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(1u, microtasks_completed_callback_count);
+ CHECK_EQ(7u, microtasks_completed_callback_count);
+ // If the policy is explicit, microtask checkpoints are explicitly invoked.
env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskOne).ToLocalChecked());
@@ -19951,24 +19919,24 @@ TEST(SetAutorunMicrotasks) {
CompileRun("1+1;");
CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(1u, microtasks_completed_callback_count);
+ CHECK_EQ(7u, microtasks_completed_callback_count);
- env->GetIsolate()->RunMicrotasks();
+ env->GetIsolate()->PerformMicrotaskCheckpoint();
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(2u, microtasks_completed_callback_count);
+ CHECK_EQ(8u, microtasks_completed_callback_count);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(2u, microtasks_completed_callback_count);
+ CHECK_EQ(8u, microtasks_completed_callback_count);
- env->GetIsolate()->RunMicrotasks();
+ env->GetIsolate()->PerformMicrotaskCheckpoint();
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(3u, microtasks_completed_callback_count);
+ CHECK_EQ(9u, microtasks_completed_callback_count);
env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
env->GetIsolate()->EnqueueMicrotask(
@@ -19976,7 +19944,7 @@ TEST(SetAutorunMicrotasks) {
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(4u, microtasks_completed_callback_count);
+ CHECK_EQ(12u, microtasks_completed_callback_count);
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
@@ -19985,13 +19953,13 @@ TEST(SetAutorunMicrotasks) {
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(4u, microtasks_completed_callback_count);
+ CHECK_EQ(12u, microtasks_completed_callback_count);
}
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(4, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(5u, microtasks_completed_callback_count);
+ CHECK_EQ(15u, microtasks_completed_callback_count);
env->GetIsolate()->RemoveMicrotasksCompletedCallback(
&MicrotasksCompletedCallback);
@@ -20000,7 +19968,7 @@ TEST(SetAutorunMicrotasks) {
CompileRun("1+1;");
CHECK_EQ(3, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(4, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
- CHECK_EQ(5u, microtasks_completed_callback_count);
+ CHECK_EQ(15u, microtasks_completed_callback_count);
}
@@ -20015,7 +19983,7 @@ TEST(RunMicrotasksWithoutEnteringContext) {
isolate->EnqueueMicrotask(
Function::New(context, MicrotaskOne).ToLocalChecked());
}
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
{
Context::Scope context_scope(context);
CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(context).FromJust());
@@ -20039,7 +20007,7 @@ static void Regress808911_CurrentContextWrapper(
CHECK(isolate->GetCurrentContext() !=
isolate->GetEnteredOrMicrotaskContext());
isolate->EnqueueMicrotask(Regress808911_MicrotaskCallback, isolate);
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
}
THREADED_TEST(Regress808911) {
@@ -22507,7 +22475,7 @@ TEST(PromiseThen) {
.ToLocalChecked()
->Int32Value(context.local())
.FromJust());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22533,7 +22501,7 @@ TEST(PromiseThen) {
.ToLocalChecked()
->Int32Value(context.local())
.FromJust());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(0, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22553,7 +22521,7 @@ TEST(PromiseThen) {
.ToLocalChecked()
->Int32Value(context.local())
.FromJust());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(3, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22602,7 +22570,7 @@ TEST(PromiseThen2) {
.ToLocalChecked()
->Int32Value(context.local())
.FromJust());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22613,7 +22581,7 @@ TEST(PromiseThen2) {
.FromJust());
Local<v8::Promise> b = a->Then(context.local(), f3, f2).ToLocalChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22624,7 +22592,7 @@ TEST(PromiseThen2) {
.FromJust());
Local<v8::Promise> c = b->Then(context.local(), f1, f2).ToLocalChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22635,7 +22603,7 @@ TEST(PromiseThen2) {
.FromJust());
v8::Local<v8::Promise> d = c->Then(context.local(), f1, f2).ToLocalChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22646,7 +22614,7 @@ TEST(PromiseThen2) {
.FromJust());
v8::Local<v8::Promise> e = d->Then(context.local(), f3, f2).ToLocalChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22657,7 +22625,7 @@ TEST(PromiseThen2) {
.FromJust());
v8::Local<v8::Promise> f = e->Then(context.local(), f1, f3).ToLocalChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22668,7 +22636,7 @@ TEST(PromiseThen2) {
.FromJust());
f->Then(context.local(), f1, f2).ToLocalChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(103, global->Get(context.local(), v8_str("x1"))
.ToLocalChecked()
->Int32Value(context.local())
@@ -22703,12 +22671,8 @@ TEST(ResolvedPromiseReFulfill) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::String> value1 =
- v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
- .ToLocalChecked();
- v8::Local<v8::String> value2 =
- v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::Local<v8::String> value1 = v8::String::NewFromUtf8Literal(isolate, "foo");
+ v8::Local<v8::String> value2 = v8::String::NewFromUtf8Literal(isolate, "bar");
v8::Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context.local()).ToLocalChecked();
@@ -22734,12 +22698,8 @@ TEST(RejectedPromiseReFulfill) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::String> value1 =
- v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
- .ToLocalChecked();
- v8::Local<v8::String> value2 =
- v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::Local<v8::String> value1 = v8::String::NewFromUtf8Literal(isolate, "foo");
+ v8::Local<v8::String> value2 = v8::String::NewFromUtf8Literal(isolate, "bar");
v8::Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context.local()).ToLocalChecked();
@@ -22988,7 +22948,7 @@ TEST(ScriptPositionInfo) {
// Fail when position is larger than script size.
CHECK(!script1->GetPositionInfo(220384, &info, script1->NO_OFFSET));
- i::Script::InitLineEnds(script1);
+ i::Script::InitLineEnds(i_isolate, script1);
}
}
@@ -25577,8 +25537,8 @@ THREADED_TEST(GlobalAccessorInfo) {
v8::HandleScope scope(isolate);
Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
global_template->SetAccessor(
- v8::String::NewFromUtf8(isolate, "prop", v8::NewStringType::kInternalized)
- .ToLocalChecked(),
+ v8::String::NewFromUtf8Literal(isolate, "prop",
+ v8::NewStringType::kInternalized),
&ensure_receiver_is_global_proxy);
LocalContext env(nullptr, global_template);
CompileRun("for (var i = 0; i < 10; i++) this.prop");
@@ -25735,7 +25695,7 @@ TEST(DynamicImport) {
i::MaybeHandle<i::JSPromise> maybe_promise =
i_isolate->RunHostImportModuleDynamicallyCallback(referrer, specifier);
i::Handle<i::JSPromise> promise = maybe_promise.ToHandleChecked();
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK(result->Equals(i::String::cast(promise->result())));
}
@@ -25884,9 +25844,8 @@ TEST(PrimitiveArray) {
array->Set(isolate, 0, symbol);
CHECK(array->Get(isolate, 0)->IsSymbol());
- Local<v8::String> string =
- v8::String::NewFromUtf8(isolate, "test", v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ Local<v8::String> string = v8::String::NewFromUtf8Literal(
+ isolate, "test", v8::NewStringType::kInternalized);
array->Set(isolate, 1, string);
CHECK(array->Get(isolate, 0)->IsSymbol());
CHECK(array->Get(isolate, 1)->IsString());
@@ -25922,10 +25881,8 @@ TEST(PersistentValueMap) {
std::string, v8::Value,
v8::DefaultPersistentValueMapTraits<std::string, v8::Value>>
map(isolate);
- v8::Local<v8::Value> value =
- v8::String::NewFromUtf8(isolate, "value",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ v8::Local<v8::Value> value = v8::String::NewFromUtf8Literal(
+ isolate, "value", v8::NewStringType::kInternalized);
map.Set("key", value);
}
@@ -25979,6 +25936,8 @@ void AtomicsWaitCallbackForTesting(
CHECK_EQ(timeout_in_ms, info->expected_timeout);
CHECK_EQ(value, info->expected_value);
CHECK_EQ(offset_in_bytes, info->expected_offset);
+ CHECK_EQ(v8::StateTag::ATOMICS_WAIT,
+ reinterpret_cast<i::Isolate*>(info->isolate)->current_vm_state());
auto ThrowSomething = [&]() {
info->isolate->ThrowException(v8::Integer::New(info->isolate, 42));
@@ -26435,7 +26394,7 @@ TEST(MicrotaskContextShouldBeNativeContext) {
" await 42;"
"})().then(callback);}");
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
}
TEST(PreviewSetKeysIteratorEntriesWithDeleted) {
@@ -27007,3 +26966,329 @@ UNINITIALIZED_TEST(NestedIsolates) {
}
#undef THREADED_PROFILED_TEST
+
+#ifndef V8_LITE_MODE
+namespace {
+// The following should correspond to Chromium's kV8DOMWrapperObjectIndex.
+static const int kV8WrapperTypeIndex = 0;
+static const int kV8WrapperObjectIndex = 1;
+
+template <typename T>
+struct GetDeoptValue {
+ static Maybe<T> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context);
+};
+
+template <>
+struct GetDeoptValue<int32_t> {
+ static Maybe<int32_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->Int32Value(context);
+ }
+};
+
+template <>
+struct GetDeoptValue<uint32_t> {
+ static Maybe<uint32_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->Uint32Value(context);
+ }
+};
+
+template <>
+struct GetDeoptValue<int64_t> {
+ static Maybe<int64_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->IntegerValue(context);
+ }
+};
+
+template <>
+struct GetDeoptValue<bool> {
+ static Maybe<bool> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return v8::Just<bool>(value->BooleanValue(CcTest::isolate()));
+ }
+};
+
+template <typename T>
+struct ApiNumberChecker {
+ enum Result {
+ kNotCalled,
+ kSlowCalled,
+ kFastCalled,
+ };
+
+ explicit ApiNumberChecker(T value) {}
+
+ static void CheckArgFast(ApiNumberChecker<T>* receiver, T argument) {
+ CHECK_NE(receiver, nullptr);
+ receiver->result = kFastCalled;
+ receiver->fast_value = argument;
+ }
+
+ static void CheckArgSlow(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK_EQ(info.Length(), 1);
+
+ v8::Object* receiver = v8::Object::Cast(*info.Holder());
+ ApiNumberChecker<T>* checker = static_cast<ApiNumberChecker<T>*>(
+ receiver->GetAlignedPointerFromInternalField(kV8WrapperObjectIndex));
+
+ CHECK_NOT_NULL(checker);
+ if (checker->result == kSlowCalled) return;
+ checker->result = kSlowCalled;
+
+ LocalContext env;
+ checker->slow_value = GetDeoptValue<T>::Get(info[0], env.local());
+ }
+
+ T fast_value = T();
+ Maybe<T> slow_value = v8::Nothing<T>();
+ Result result = kNotCalled;
+};
+
+enum class Behavior {
+ kSuccess, // The callback function should be called with the expected value,
+ // which == initial.
+ kThrow, // An exception should be thrown by the callback function.
+};
+
+enum class PathTaken {
+ kFast, // The fast path is taken after optimization.
+ kSlow, // The slow path is taken always.
+};
+
+template <typename T>
+void SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
+ ApiNumberChecker<T>* checker) {
+ v8::Isolate* isolate = CcTest::isolate();
+
+ v8::CFunction c_func = v8::CFunction::Make(ApiNumberChecker<T>::CheckArgFast);
+
+ Local<v8::FunctionTemplate> checker_templ = v8::FunctionTemplate::New(
+ isolate, ApiNumberChecker<T>::CheckArgSlow, v8::Local<v8::Value>(),
+ v8::Local<v8::Signature>(), 1, v8::ConstructorBehavior::kAllow,
+ v8::SideEffectType::kHasSideEffect, &c_func);
+
+ v8::Local<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetInternalFieldCount(kV8WrapperObjectIndex + 1);
+ object_template->Set(v8_str("api_func"), checker_templ);
+
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(env->local()).ToLocalChecked();
+ object->SetAlignedPointerInInternalField(kV8WrapperObjectIndex,
+ reinterpret_cast<void*>(checker));
+
+ CHECK((*env)
+ ->Global()
+ ->Set(env->local(), v8_str("receiver"), object)
+ .FromJust());
+ CHECK((*env)
+ ->Global()
+ ->Set(env->local(), v8_str("value"), initial_value)
+ .FromJust());
+ CompileRun(
+ "function func(arg) { receiver.api_func(arg); }"
+ "%PrepareFunctionForOptimization(func);"
+ "func(value);"
+ "%OptimizeFunctionOnNextCall(func);"
+ "func(value);");
+}
+
+template <typename T>
+void CallAndCheck(T expected_value, Behavior expected_behavior,
+ PathTaken expected_path, v8::Local<v8::Value> initial_value) {
+ LocalContext env;
+ v8::TryCatch try_catch(CcTest::isolate());
+ ApiNumberChecker<T> checker(expected_value);
+
+ SetupTest<T>(initial_value, &env, &checker);
+
+ if (expected_behavior == Behavior::kThrow) {
+ CHECK(try_catch.HasCaught());
+ CHECK_NE(checker.result, ApiNumberChecker<T>::kFastCalled);
+ } else {
+ CHECK_EQ(try_catch.HasCaught(), false);
+ }
+
+ if (expected_path == PathTaken::kSlow) {
+ // The slow version callback should have been called twice.
+ CHECK_EQ(checker.result, ApiNumberChecker<T>::kSlowCalled);
+
+ if (expected_behavior != Behavior::kThrow) {
+ T slow_value_typed = checker.slow_value.ToChecked();
+ CHECK_EQ(slow_value_typed, expected_value);
+ }
+ } else if (expected_path == PathTaken::kFast) {
+ CHECK_EQ(checker.result, ApiNumberChecker<T>::kFastCalled);
+ CHECK_EQ(checker.fast_value, expected_value);
+ }
+}
+
+void CallAndDeopt() {
+ LocalContext env;
+ v8::Local<v8::Value> initial_value(v8_num(42));
+ ApiNumberChecker<int32_t> checker(42);
+ SetupTest(initial_value, &env, &checker);
+
+ v8::Local<v8::Value> function = CompileRun(
+ "try { func(BigInt(42)); } catch(e) {}"
+ "%PrepareFunctionForOptimization(func);"
+ "%OptimizeFunctionOnNextCall(func);"
+ "func(value);"
+ "func;");
+ CHECK(function->IsFunction());
+ i::Handle<i::JSFunction> ifunction =
+ i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*function));
+ CHECK(ifunction->IsOptimized());
+}
+
+void CallWithLessArguments() {
+ LocalContext env;
+ v8::Local<v8::Value> initial_value(v8_num(42));
+ ApiNumberChecker<int32_t> checker(42);
+ SetupTest(initial_value, &env, &checker);
+
+ CompileRun("func();");
+
+ // Passing not enough arguments should go through the slow path.
+ CHECK_EQ(checker.result, ApiNumberChecker<int32_t>::kSlowCalled);
+}
+
+void CallWithMoreArguments() {
+ LocalContext env;
+ v8::Local<v8::Value> initial_value(v8_num(42));
+ ApiNumberChecker<int32_t> checker(42);
+ SetupTest(initial_value, &env, &checker);
+
+ CompileRun(
+ "%PrepareFunctionForOptimization(func);"
+ "%OptimizeFunctionOnNextCall(func);"
+ "func(value, value);");
+
+ // Passing too many arguments should just ignore the extra ones.
+ CHECK_EQ(checker.result, ApiNumberChecker<int32_t>::kFastCalled);
+}
+} // namespace
+
+namespace v8 {
+template <typename T>
+class WrapperTraits<ApiNumberChecker<T>> {
+ public:
+ static const void* GetTypeInfo() {
+ static const int tag = 0;
+ return reinterpret_cast<const void*>(&tag);
+ }
+};
+} // namespace v8
+#endif // V8_LITE_MODE
+
+TEST(FastApiCalls) {
+#ifndef V8_LITE_MODE
+ if (i::FLAG_jitless) return;
+
+ i::FLAG_turbo_fast_api_calls = true;
+ i::FLAG_opt = true;
+ i::FLAG_allow_natives_syntax = true;
+ // Disable --always_opt, otherwise we haven't generated the necessary
+ // feedback to go down the "best optimization" path for the fast call.
+ i::FLAG_always_opt = false;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->set_embedder_wrapper_type_index(kV8WrapperTypeIndex);
+ i_isolate->set_embedder_wrapper_object_index(kV8WrapperObjectIndex);
+
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+
+ // Main cases (the value fits in the type)
+ CallAndCheck<int32_t>(-42, Behavior::kSuccess, PathTaken::kFast, v8_num(-42));
+ CallAndCheck<uint32_t>(i::Smi::kMaxValue, Behavior::kSuccess,
+ PathTaken::kFast, v8_num(i::Smi::kMaxValue));
+#ifdef V8_TARGET_ARCH_X64
+ CallAndCheck<int64_t>(static_cast<int64_t>(i::Smi::kMaxValue) + 1,
+ Behavior::kSuccess, PathTaken::kFast,
+ v8_num(static_cast<int64_t>(i::Smi::kMaxValue) + 1));
+#endif // V8_TARGET_ARCH_X64
+
+ CallAndCheck<bool>(false, Behavior::kSuccess, PathTaken::kFast,
+ v8::Boolean::New(isolate, false));
+ CallAndCheck<bool>(true, Behavior::kSuccess, PathTaken::kFast,
+ v8::Boolean::New(isolate, true));
+
+ // Corner cases (the value is out of bounds or of different type) - int32_t
+ CallAndCheck<int32_t>(0, Behavior::kSuccess, PathTaken::kFast, v8_num(-0.0));
+ CallAndCheck<int32_t>(0, Behavior::kSuccess, PathTaken::kFast,
+ v8_num(std::numeric_limits<double>::quiet_NaN()));
+ CallAndCheck<int32_t>(0, Behavior::kSuccess, PathTaken::kFast,
+ v8_num(std::numeric_limits<double>::infinity()));
+ CallAndCheck<int32_t>(0, Behavior::kSuccess, PathTaken::kSlow,
+ v8_str("some_string"));
+ CallAndCheck<int32_t>(0, Behavior::kSuccess, PathTaken::kSlow,
+ v8::Object::New(isolate));
+ CallAndCheck<int32_t>(0, Behavior::kSuccess, PathTaken::kSlow,
+ v8::Array::New(isolate));
+ CallAndCheck<int32_t>(0, Behavior::kThrow, PathTaken::kSlow,
+ v8::BigInt::New(isolate, 42));
+ CallAndCheck<int32_t>(std::numeric_limits<int32_t>::min(), Behavior::kSuccess,
+ PathTaken::kFast,
+ v8_num(std::numeric_limits<int32_t>::min()));
+ CallAndCheck<int32_t>(
+ std::numeric_limits<int32_t>::min(), Behavior::kSuccess, PathTaken::kFast,
+ v8_num(static_cast<double>(std::numeric_limits<int32_t>::max()) + 1));
+
+ CallAndCheck<int32_t>(3, Behavior::kSuccess, PathTaken::kFast, v8_num(3.14));
+
+ // Corner cases - uint32_t
+ CallAndCheck<uint32_t>(0, Behavior::kSuccess, PathTaken::kFast, v8_num(-0.0));
+ CallAndCheck<uint32_t>(0, Behavior::kSuccess, PathTaken::kFast,
+ v8_num(std::numeric_limits<double>::quiet_NaN()));
+ CallAndCheck<uint32_t>(0, Behavior::kSuccess, PathTaken::kFast,
+ v8_num(std::numeric_limits<double>::infinity()));
+ CallAndCheck<uint32_t>(0, Behavior::kSuccess, PathTaken::kSlow,
+ v8_str("some_string"));
+ CallAndCheck<uint32_t>(0, Behavior::kSuccess, PathTaken::kSlow,
+ v8::Object::New(isolate));
+ CallAndCheck<uint32_t>(0, Behavior::kSuccess, PathTaken::kSlow,
+ v8::Array::New(isolate));
+ CallAndCheck<uint32_t>(0, Behavior::kThrow, PathTaken::kSlow,
+ v8::BigInt::New(isolate, 42));
+ CallAndCheck<uint32_t>(std::numeric_limits<uint32_t>::min(),
+ Behavior::kSuccess, PathTaken::kFast,
+ v8_num(std::numeric_limits<uint32_t>::max() + 1));
+ CallAndCheck<uint32_t>(3, Behavior::kSuccess, PathTaken::kFast, v8_num(3.14));
+
+ // Corner cases - bool
+ CallAndCheck<bool>(false, Behavior::kSuccess, PathTaken::kFast,
+ v8::Undefined(isolate));
+ CallAndCheck<bool>(false, Behavior::kSuccess, PathTaken::kFast,
+ v8::Null(isolate));
+ CallAndCheck<bool>(false, Behavior::kSuccess, PathTaken::kFast, v8_num(0));
+ CallAndCheck<bool>(true, Behavior::kSuccess, PathTaken::kFast, v8_num(42));
+ CallAndCheck<bool>(false, Behavior::kSuccess, PathTaken::kFast, v8_str(""));
+ CallAndCheck<bool>(true, Behavior::kSuccess, PathTaken::kFast,
+ v8_str("some_string"));
+ CallAndCheck<bool>(true, Behavior::kSuccess, PathTaken::kFast,
+ v8::Symbol::New(isolate));
+ CallAndCheck<bool>(false, Behavior::kSuccess, PathTaken::kFast,
+ v8::BigInt::New(isolate, 0));
+ CallAndCheck<bool>(true, Behavior::kSuccess, PathTaken::kFast,
+ v8::BigInt::New(isolate, 42));
+ CallAndCheck<bool>(true, Behavior::kSuccess, PathTaken::kFast,
+ v8::Object::New(isolate));
+
+ // Check for the deopt loop protection
+ CallAndDeopt();
+
+ // Wrong number of arguments
+ CallWithLessArguments();
+ CallWithMoreArguments();
+
+ // TODO(mslekova): Add corner cases for 64-bit values.
+ // TODO(mslekova): Add main cases for float and double.
+ // TODO(mslekova): Restructure the tests so that the fast optimized calls
+ // are compared against the slow optimized calls.
+#endif // V8_LITE_MODE
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 8813815317..7b9a3ee842 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -186,6 +186,7 @@ static void InitializeVM() {
#define RESET() \
owned_buf->MakeWritable(); \
__ Reset(); \
+ __ CodeEntry(); \
/* Reset the machine state (like simulator.ResetState()). */ \
__ Msr(NZCV, xzr); \
__ Msr(FPCR, xzr);
@@ -193,8 +194,8 @@ static void InitializeVM() {
#define START_AFTER_RESET() \
__ PushCalleeSavedRegisters();
-#define START() \
- RESET(); \
+#define START() \
+ RESET(); \
START_AFTER_RESET();
#define RUN() \
@@ -1649,27 +1650,27 @@ TEST(adr) {
__ Adr(x3, &label_1);
__ Adr(x4, &label_1);
- __ Bind(&label_2);
+ __ Bind(&label_2, BranchTargetIdentifier::kBtiJump);
__ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
__ Eor(x6, x2, Operand(x4));
__ Orr(x0, x0, Operand(x5));
__ Orr(x0, x0, Operand(x6));
__ Br(x2); // label_1, label_3
- __ Bind(&label_3);
+ __ Bind(&label_3, BranchTargetIdentifier::kBtiJump);
__ Adr(x2, &label_3); // Self-reference (offset 0).
__ Eor(x1, x1, Operand(x2));
__ Adr(x2, &label_4); // Simple forward reference.
__ Br(x2); // label_4
- __ Bind(&label_1);
+ __ Bind(&label_1, BranchTargetIdentifier::kBtiJump);
__ Adr(x2, &label_3); // Multiple reverse references to the same label.
__ Adr(x3, &label_3);
__ Adr(x4, &label_3);
__ Adr(x5, &label_2); // Simple reverse reference.
__ Br(x5); // label_2
- __ Bind(&label_4);
+ __ Bind(&label_4, BranchTargetIdentifier::kBtiJump);
END();
RUN();
@@ -1695,11 +1696,11 @@ TEST(adr_far) {
__ Adr(x10, &near_forward, MacroAssembler::kAdrFar);
__ Br(x10);
__ B(&fail);
- __ Bind(&near_backward);
+ __ Bind(&near_backward, BranchTargetIdentifier::kBtiJump);
__ Orr(x0, x0, 1 << 1);
__ B(&test_far);
- __ Bind(&near_forward);
+ __ Bind(&near_forward, BranchTargetIdentifier::kBtiJump);
__ Orr(x0, x0, 1 << 0);
__ Adr(x10, &near_backward, MacroAssembler::kAdrFar);
__ Br(x10);
@@ -1708,7 +1709,7 @@ TEST(adr_far) {
__ Adr(x10, &far_forward, MacroAssembler::kAdrFar);
__ Br(x10);
__ B(&fail);
- __ Bind(&far_backward);
+ __ Bind(&far_backward, BranchTargetIdentifier::kBtiJump);
__ Orr(x0, x0, 1 << 3);
__ B(&done);
@@ -1722,8 +1723,7 @@ TEST(adr_far) {
}
}
-
- __ Bind(&far_forward);
+ __ Bind(&far_forward, BranchTargetIdentifier::kBtiJump);
__ Orr(x0, x0, 1 << 2);
__ Adr(x10, &far_backward, MacroAssembler::kAdrFar);
__ Br(x10);
@@ -1832,7 +1832,7 @@ TEST(branch_to_reg) {
SETUP();
// Test br.
- Label fn1, after_fn1;
+ Label fn1, after_fn1, after_bl1;
START();
__ Mov(x29, lr);
@@ -1847,9 +1847,10 @@ TEST(branch_to_reg) {
__ Bind(&after_fn1);
__ Bl(&fn1);
+ __ Bind(&after_bl1, BranchTargetIdentifier::kBtiJump); // For Br(x0) in fn1.
// Test blr.
- Label fn2, after_fn2;
+ Label fn2, after_fn2, after_bl2;
__ Mov(x2, 0);
__ B(&after_fn2);
@@ -1861,6 +1862,7 @@ TEST(branch_to_reg) {
__ Bind(&after_fn2);
__ Bl(&fn2);
+ __ Bind(&after_bl2, BranchTargetIdentifier::kBtiCall); // For Blr(x0) in fn2.
__ Mov(x3, lr);
__ Mov(lr, x29);
@@ -1873,6 +1875,76 @@ TEST(branch_to_reg) {
CHECK_EQUAL_64(84, x2);
}
+static void BtiHelper(Register ipreg) {
+ SETUP();
+
+ Label jump_target, jump_call_target, call_target, done;
+ START();
+ UseScratchRegisterScope temps(&masm);
+ temps.Exclude(ipreg);
+ __ Adr(x0, &jump_target);
+ __ Br(x0);
+ __ Nop();
+ __ Bind(&jump_target, BranchTargetIdentifier::kBtiJump);
+ __ Adr(x0, &call_target);
+ __ Blr(x0);
+ __ Adr(ipreg, &jump_call_target);
+ __ Blr(ipreg);
+ __ Adr(lr, &done); // Make Ret return to done label.
+ __ Br(ipreg);
+ __ Bind(&call_target, BranchTargetIdentifier::kBtiCall);
+ __ Ret();
+ __ Bind(&jump_call_target, BranchTargetIdentifier::kBtiJumpCall);
+ __ Ret();
+ __ Bind(&done);
+ END();
+
+#ifdef USE_SIMULATOR
+ simulator.SetGuardedPages(true);
+ RUN();
+#endif // USE_SIMULATOR
+}
+
+TEST(bti) {
+ BtiHelper(x16);
+ BtiHelper(x17);
+}
+
+TEST(unguarded_bti_is_nop) {
+ SETUP();
+
+ Label start, none, c, j, jc;
+ START();
+ __ B(&start);
+ __ Bind(&none, BranchTargetIdentifier::kBti);
+ __ Bind(&c, BranchTargetIdentifier::kBtiCall);
+ __ Bind(&j, BranchTargetIdentifier::kBtiJump);
+ __ Bind(&jc, BranchTargetIdentifier::kBtiJumpCall);
+ CHECK(__ SizeOfCodeGeneratedSince(&none) == 4 * kInstrSize);
+ __ Ret();
+
+ Label jump_to_c, call_to_j;
+ __ Bind(&start);
+ __ Adr(x0, &none);
+ __ Adr(lr, &jump_to_c);
+ __ Br(x0);
+
+ __ Bind(&jump_to_c);
+ __ Adr(x0, &c);
+ __ Adr(lr, &call_to_j);
+ __ Br(x0);
+
+ __ Bind(&call_to_j);
+ __ Adr(x0, &j);
+ __ Blr(x0);
+ END();
+
+#ifdef USE_SIMULATOR
+ simulator.SetGuardedPages(false);
+ RUN();
+#endif // USE_SIMULATOR
+}
+
TEST(compare_branch) {
INIT_V8();
SETUP();
@@ -6631,11 +6703,7 @@ static void LdrLiteralRangeHelper(
size_t expected_pool_size = 0;
-#if defined(_M_ARM64) && !defined(__clang__)
- auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
-#else
- auto PoolSizeAt = [unaligned_emission](int pc_offset) {
-#endif
+ auto PoolSizeAt = [&](int pc_offset) {
// To determine padding, consider the size of the prologue of the pool,
// and the jump around the pool, which we always need.
size_t prologue_size = 2 * kInstrSize + kInstrSize;
@@ -12121,9 +12189,10 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
// Registers in the TmpList can be used by the macro assembler for debug code
// (for example in 'Pop'), so we can't use them here.
// x18 is reserved for the platform register.
- // Disallow x31 / xzr, to ensure this list has an even number of elements, to
- // ensure alignment.
- RegList allowed = ~(masm.TmpList()->list() | x18.bit() | x31.bit());
+ // For simplicity, exclude LR as well, as we would need to sign it when
+ // pushing it. This also ensures that the list has an even number of elements,
+ // which is needed for alignment.
+ RegList allowed = ~(masm.TmpList()->list() | x18.bit() | lr.bit());
if (reg_count == kPushPopMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters);
}
@@ -12157,7 +12226,8 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case PushPopByFour:
// Push high-numbered registers first (to the highest addresses).
for (i = reg_count; i >= 4; i -= 4) {
- __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
+ __ Push<TurboAssembler::kDontStoreLR>(r[i - 1], r[i - 2], r[i - 3],
+ r[i - 4]);
}
// Finish off the leftovers.
switch (i) {
@@ -12170,7 +12240,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
}
break;
case PushPopRegList:
- __ PushSizeRegList(list, reg_size);
+ __ PushSizeRegList<TurboAssembler::kDontStoreLR>(list, reg_size);
break;
}
@@ -12181,7 +12251,8 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case PushPopByFour:
// Pop low-numbered registers first (from the lowest addresses).
for (i = 0; i <= (reg_count-4); i += 4) {
- __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
+ __ Pop<TurboAssembler::kDontLoadLR>(r[i], r[i + 1], r[i + 2],
+ r[i + 3]);
}
// Finish off the leftovers.
switch (reg_count - i) {
@@ -12194,7 +12265,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
}
break;
case PushPopRegList:
- __ PopSizeRegList(list, reg_size);
+ __ PopSizeRegList<TurboAssembler::kDontLoadLR>(list, reg_size);
break;
}
}
@@ -12525,10 +12596,10 @@ TEST(push_pop) {
__ Claim(2);
__ PushXRegList(0);
__ PopXRegList(0);
- // Don't push/pop x18 (platform register) or xzr (for alignment)
- RegList all_regs = 0xFFFFFFFF & ~(x18.bit() | x31.bit());
- __ PushXRegList(all_regs);
- __ PopXRegList(all_regs);
+ // Don't push/pop x18 (platform register) or lr
+ RegList all_regs = 0xFFFFFFFF & ~(x18.bit() | lr.bit());
+ __ PushXRegList<TurboAssembler::kDontStoreLR>(all_regs);
+ __ PopXRegList<TurboAssembler::kDontLoadLR>(all_regs);
__ Drop(12);
END();
@@ -13909,7 +13980,7 @@ TEST(blr_lr) {
__ Mov(x0, 0xDEADBEEF);
__ B(&end);
- __ Bind(&target);
+ __ Bind(&target, BranchTargetIdentifier::kBtiCall);
__ Mov(x0, 0xC001C0DE);
__ Bind(&end);
@@ -14527,13 +14598,11 @@ TEST(near_call_no_relocation) {
__ Bind(&test);
__ Mov(x0, 0x0);
- __ Push(lr, xzr);
{
Assembler::BlockConstPoolScope scope(&masm);
int offset = (function.pos() - __ pc_offset()) / kInstrSize;
__ near_call(offset, RelocInfo::NONE);
}
- __ Pop(xzr, lr);
END();
RUN();
@@ -14753,7 +14822,7 @@ TEST(jump_tables_forward) {
}
for (int i = 0; i < kNumCases; ++i) {
- __ Bind(&labels[i]);
+ __ Bind(&labels[i], BranchTargetIdentifier::kBtiJump);
__ Mov(value, values[i]);
__ B(&done);
}
@@ -14801,7 +14870,7 @@ TEST(jump_tables_backward) {
__ B(&loop);
for (int i = 0; i < kNumCases; ++i) {
- __ Bind(&labels[i]);
+ __ Bind(&labels[i], BranchTargetIdentifier::kBtiJump);
__ Mov(value, values[i]);
__ B(&done);
}
@@ -14863,7 +14932,7 @@ TEST(internal_reference_linked) {
__ dcptr(&done);
__ Tbz(x0, 1, &done);
- __ Bind(&done);
+ __ Bind(&done, BranchTargetIdentifier::kBtiJump);
__ Mov(x0, 1);
END();
diff --git a/deps/v8/test/cctest/test-code-pages.cc b/deps/v8/test/cctest/test-code-pages.cc
index 93eddb0a4c..7fafe62629 100644
--- a/deps/v8/test/cctest/test-code-pages.cc
+++ b/deps/v8/test/cctest/test-code-pages.cc
@@ -21,10 +21,8 @@ namespace test_code_pages {
// 2 - Have code pages. ARM32 only
// 3 - Nothing - This feature does not work on other platforms.
#if defined(V8_TARGET_ARCH_ARM)
-static const bool kHaveCodeRange = false;
static const bool kHaveCodePages = true;
#else
-static const bool kHaveCodeRange = kRequiresCodeRange;
static const bool kHaveCodePages = false;
#endif // defined(V8_TARGET_ARCH_ARM)
@@ -86,11 +84,10 @@ bool PagesContainsAddress(std::vector<MemoryRange>* pages,
} // namespace
TEST(CodeRangeCorrectContents) {
- if (!kHaveCodeRange) return;
-
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (!i_isolate->RequiresCodeRange()) return;
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
@@ -128,12 +125,12 @@ TEST(CodePagesCorrectContents) {
}
TEST(OptimizedCodeWithCodeRange) {
- if (!kHaveCodeRange) return;
-
FLAG_allow_natives_syntax = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (!i_isolate->RequiresCodeRange()) return;
+
HandleScope scope(i_isolate);
std::string foo_str = getFooCode(1);
@@ -255,7 +252,6 @@ TEST(OptimizedCodeWithCodePages) {
}
TEST(LargeCodeObject) {
- if (!kHaveCodeRange && !kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
@@ -263,6 +259,7 @@ TEST(LargeCodeObject) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
@@ -290,7 +287,7 @@ TEST(LargeCodeObject) {
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
- if (kHaveCodeRange) {
+ if (i_isolate->RequiresCodeRange()) {
CHECK(PagesContainsAddress(pages, foo_code->address()));
} else {
CHECK(PagesHasExactPage(pages, foo_code->address()));
@@ -371,7 +368,6 @@ class SamplingThread : public base::Thread {
};
TEST(LargeCodeObjectWithSignalHandler) {
- if (!kHaveCodeRange && !kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
@@ -379,6 +375,7 @@ TEST(LargeCodeObjectWithSignalHandler) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
@@ -421,7 +418,7 @@ TEST(LargeCodeObjectWithSignalHandler) {
// Check that the page was added.
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
- if (kHaveCodeRange) {
+ if (i_isolate->RequiresCodeRange()) {
CHECK(PagesContainsAddress(&pages, foo_code->address()));
} else {
CHECK(PagesHasExactPage(&pages, foo_code->address()));
@@ -447,7 +444,6 @@ TEST(LargeCodeObjectWithSignalHandler) {
}
TEST(Sorted) {
- if (!kHaveCodeRange && !kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
@@ -455,6 +451,7 @@ TEST(Sorted) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
@@ -507,7 +504,7 @@ TEST(Sorted) {
// Check that the pages were added.
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
- if (kHaveCodeRange) {
+ if (i_isolate->RequiresCodeRange()) {
CHECK_EQ(pages.size(), initial_num_pages);
} else {
CHECK_EQ(pages.size(), initial_num_pages + 3);
@@ -528,7 +525,7 @@ TEST(Sorted) {
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
- if (kHaveCodeRange) {
+ if (i_isolate->RequiresCodeRange()) {
CHECK_EQ(pages.size(), initial_num_pages);
} else {
CHECK_EQ(pages.size(), initial_num_pages + 2);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 623ff0d0f3..755041e0fb 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -728,10 +728,16 @@ TEST(TryToName) {
{
// TryToName(<internalized uncacheable number string greater than
- // array index>) => is_keyisunique: <internalized string>.
+ // array index but less than MAX_SAFE_INTEGER>) => 32-bit platforms
+ // take the if_keyisunique path, 64-bit platforms bail out because they
+ // let the runtime handle the string-to-size_t parsing.
Handle<Object> key =
isolate->factory()->InternalizeUtf8String("4294967296");
+#if V8_TARGET_ARCH_64_BIT
+ ft.CheckTrue(key, expect_bailout);
+#else
ft.CheckTrue(key, expect_unique, key);
+#endif
}
{
@@ -1203,8 +1209,8 @@ TEST(TryHasOwnProperty) {
enum Result { kFound, kNotFound, kBailout };
{
- Node* object = m.Parameter(0);
- Node* unique_name = m.Parameter(1);
+ TNode<HeapObject> object = m.CAST(m.Parameter(0));
+ TNode<Name> unique_name = m.CAST(m.Parameter(1));
TNode<MaybeObject> expected_result =
m.UncheckedCast<MaybeObject>(m.Parameter(2));
@@ -2292,7 +2298,8 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
: CodeStubAssembler(state), kind_(kind) {}
void TestAppendJSArrayImpl(Isolate* isolate, CodeAssemblerTester* csa_tester,
- Object o1, Object o2, Object o3, Object o4,
+ Handle<Object> o1, Handle<Object> o2,
+ Handle<Object> o3, Handle<Object> o4,
int initial_size, int result_size) {
Handle<JSArray> array = isolate->factory()->NewJSArray(
kind_, 2, initial_size, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
@@ -2315,23 +2322,22 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
FunctionTester ft(csa_tester->GenerateCode(), kNumParams);
- Handle<Object> result =
- ft.Call(Handle<Object>(o1, isolate), Handle<Object>(o2, isolate),
- Handle<Object>(o3, isolate), Handle<Object>(o4, isolate))
- .ToHandleChecked();
+ Handle<Object> result = ft.Call(o1, o2, o3, o4).ToHandleChecked();
CHECK_EQ(kind_, array->GetElementsKind());
CHECK_EQ(result_size, Handle<Smi>::cast(result)->value());
CHECK_EQ(result_size, Smi::ToInt(array->length()));
- Object obj = *JSObject::GetElement(isolate, array, 2).ToHandleChecked();
- HeapObject undefined_value = ReadOnlyRoots(isolate).undefined_value();
- CHECK_EQ(result_size < 3 ? undefined_value : o1, obj);
- obj = *JSObject::GetElement(isolate, array, 3).ToHandleChecked();
- CHECK_EQ(result_size < 4 ? undefined_value : o2, obj);
- obj = *JSObject::GetElement(isolate, array, 4).ToHandleChecked();
- CHECK_EQ(result_size < 5 ? undefined_value : o3, obj);
- obj = *JSObject::GetElement(isolate, array, 5).ToHandleChecked();
- CHECK_EQ(result_size < 6 ? undefined_value : o4, obj);
+ Handle<Object> obj =
+ JSObject::GetElement(isolate, array, 2).ToHandleChecked();
+ Handle<HeapObject> undefined_value =
+ Handle<HeapObject>(ReadOnlyRoots(isolate).undefined_value(), isolate);
+ CHECK_EQ(result_size < 3 ? *undefined_value : *o1, *obj);
+ obj = JSObject::GetElement(isolate, array, 3).ToHandleChecked();
+ CHECK_EQ(result_size < 4 ? *undefined_value : *o2, *obj);
+ obj = JSObject::GetElement(isolate, array, 4).ToHandleChecked();
+ CHECK_EQ(result_size < 5 ? *undefined_value : *o3, *obj);
+ obj = JSObject::GetElement(isolate, array, 5).ToHandleChecked();
+ CHECK_EQ(result_size < 6 ? *undefined_value : *o4, *obj);
}
static void TestAppendJSArray(Isolate* isolate, ElementsKind kind, Object o1,
@@ -2339,8 +2345,10 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
int initial_size, int result_size) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
AppendJSArrayCodeStubAssembler m(asm_tester.state(), kind);
- m.TestAppendJSArrayImpl(isolate, &asm_tester, o1, o2, o3, o4, initial_size,
- result_size);
+ m.TestAppendJSArrayImpl(
+ isolate, &asm_tester, Handle<Object>(o1, isolate),
+ Handle<Object>(o2, isolate), Handle<Object>(o3, isolate),
+ Handle<Object>(o4, isolate), initial_size, result_size);
}
private:
@@ -3305,8 +3313,8 @@ TEST(ExtractFixedArrayCOWForceCopy) {
CodeStubAssembler m(asm_tester.state());
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
- m.Return(m.ExtractFixedArray(m.Parameter(0), m.SmiConstant(0), nullptr,
- nullptr, flags,
+ m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(0)), m.SmiConstant(0),
+ nullptr, nullptr, flags,
CodeStubAssembler::SMI_PARAMETERS));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3334,8 +3342,8 @@ TEST(ExtractFixedArraySimple) {
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
- m.Return(m.ExtractFixedArray(m.Parameter(0), m.Parameter(1), m.Parameter(2),
- nullptr, flags,
+ m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(0)), m.Parameter(1),
+ m.Parameter(2), nullptr, flags,
CodeStubAssembler::SMI_PARAMETERS));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3361,7 +3369,7 @@ TEST(ExtractFixedArraySimpleSmiConstant) {
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
- m.Return(m.ExtractFixedArray(m.Parameter(0), m.SmiConstant(1),
+ m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(0)), m.SmiConstant(1),
m.SmiConstant(2), nullptr, flags,
CodeStubAssembler::SMI_PARAMETERS));
}
@@ -3385,7 +3393,7 @@ TEST(ExtractFixedArraySimpleIntPtrConstant) {
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
- m.Return(m.ExtractFixedArray(m.Parameter(0), m.IntPtrConstant(1),
+ m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(0)), m.IntPtrConstant(1),
m.IntPtrConstant(2), nullptr, flags,
CodeStubAssembler::INTPTR_PARAMETERS));
}
@@ -3407,8 +3415,8 @@ TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
{
CodeStubAssembler m(asm_tester.state());
m.Return(m.ExtractFixedArray(
- m.Parameter(0), m.IntPtrConstant(1), m.IntPtrConstant(2), nullptr,
- CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays,
+ m.CAST(m.Parameter(0)), m.IntPtrConstant(1), m.IntPtrConstant(2),
+ nullptr, CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays,
CodeStubAssembler::INTPTR_PARAMETERS));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3430,7 +3438,8 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
CodeStubAssembler m(asm_tester.state());
TNode<IntPtrT> p1_untagged = m.SmiUntag(m.Parameter(1));
TNode<IntPtrT> p2_untagged = m.SmiUntag(m.Parameter(2));
- m.Return(m.ExtractFixedArray(m.Parameter(0), p1_untagged, p2_untagged));
+ m.Return(
+ m.ExtractFixedArray(m.CAST(m.Parameter(0)), p1_untagged, p2_untagged));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3622,7 +3631,7 @@ TEST(TestCallBuiltinInlineTrampoline) {
const int kContextOffset = 2;
Node* str = m.Parameter(0);
- Node* context = m.Parameter(kNumParams + kContextOffset);
+ TNode<Context> context = m.CAST(m.Parameter(kNumParams + kContextOffset));
TNode<Smi> index = m.SmiConstant(2);
@@ -3648,7 +3657,7 @@ DISABLED_TEST(TestCallBuiltinIndirectLoad) {
const int kContextOffset = 2;
Node* str = m.Parameter(0);
- Node* context = m.Parameter(kNumParams + kContextOffset);
+ TNode<Context> context = m.CAST(m.Parameter(kNumParams + kContextOffset));
TNode<Smi> index = m.SmiConstant(2);
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index a861655adc..3d063ac770 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
-#if defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
const ConstantPoolEntry::Type kPtrType = ConstantPoolEntry::INTPTR;
const ConstantPoolEntry::Type kDblType = ConstantPoolEntry::DOUBLE;
@@ -249,7 +249,7 @@ TEST(ConstantPoolNoSharing) {
CHECK_EQ(access, kOvflAccess);
}
-#endif // defined(V8_TARGET_ARCH_PPC)
+#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index 513b9ac1b9..a8b1ecb7a9 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -122,6 +122,7 @@ class StringResource : public v8::String::ExternalStringResource {
TEST(GetObjectProperties) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::HandleScope scope(isolate);
LocalContext context;
// Claim we don't know anything about the heap layout.
@@ -180,11 +181,8 @@ TEST(GetObjectProperties) {
: Contains(props->brief, "maybe EmptyFixedArray"));
// Provide a heap first page so the API can be more sure.
- heap_addresses.read_only_space_first_page =
- reinterpret_cast<uintptr_t>(reinterpret_cast<i::Isolate*>(isolate)
- ->heap()
- ->read_only_space()
- ->first_page());
+ heap_addresses.read_only_space_first_page = reinterpret_cast<uintptr_t>(
+ i_isolate->heap()->read_only_space()->first_page());
props =
d::GetObjectProperties(properties_or_hash, &ReadMemory, heap_addresses);
CHECK(props->type_check_result ==
@@ -373,10 +371,25 @@ TEST(GetObjectProperties) {
ReadProp<i::Tagged_t>(*props, "shared_function_info"), &ReadMemory,
heap_addresses);
const d::ObjectProperty& flags = FindProp(*props, "flags");
- CheckStructProp(*flags.struct_fields[0], "v8::internal::FunctionKind",
- "function_kind", 0, 5, 0);
+ CHECK_GE(flags.num_struct_fields, 3);
+ CheckStructProp(*flags.struct_fields[0], "FunctionKind", "function_kind", 0,
+ 5, 0);
CheckStructProp(*flags.struct_fields[1], "bool", "is_native", 0, 1, 5);
CheckStructProp(*flags.struct_fields[2], "bool", "is_strict", 0, 1, 6);
+
+ // Get data about a different bitfield struct which is contained within a smi.
+ Handle<i::JSFunction> function = Handle<i::JSFunction>::cast(o);
+ Handle<i::SharedFunctionInfo> shared(function->shared(), i_isolate);
+ Handle<i::DebugInfo> debug_info =
+ i_isolate->debug()->GetOrCreateDebugInfo(shared);
+ props =
+ d::GetObjectProperties(debug_info->ptr(), &ReadMemory, heap_addresses);
+ const d::ObjectProperty& debug_flags = FindProp(*props, "flags");
+ CHECK_GE(debug_flags.num_struct_fields, 5);
+ CheckStructProp(*debug_flags.struct_fields[0], "bool", "has_break_info", 0, 1,
+ i::kSmiTagSize + i::kSmiShiftSize);
+ CheckStructProp(*debug_flags.struct_fields[4], "bool", "can_break_at_entry",
+ 0, 1, i::kSmiTagSize + i::kSmiShiftSize + 4);
}
TEST(ListObjectClasses) {
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 4aae4294da..ed669db806 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -35,6 +35,7 @@
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames.h"
+#include "src/execution/microtask-queue.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/utils.h"
@@ -2932,9 +2933,11 @@ TEST(DebugBreak) {
class DebugScopingListener : public v8::debug::DebugDelegate {
public:
- void BreakProgramRequested(
- v8::Local<v8::Context>,
- const std::vector<v8::debug::BreakpointId>&) override {
+ void ExceptionThrown(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise, bool is_uncaught,
+ v8::debug::ExceptionType exception_type) override {
+ break_count_++;
auto stack_traces =
v8::debug::StackTraceIterator::Create(CcTest::isolate());
v8::debug::Location location = stack_traces->GetSourceLocation();
@@ -2957,6 +2960,10 @@ class DebugScopingListener : public v8::debug::DebugDelegate {
scopes->Advance();
CHECK(scopes->Done());
}
+ unsigned break_count() const { return break_count_; }
+
+ private:
+ unsigned break_count_ = 0;
};
TEST(DebugBreakInWrappedScript) {
@@ -2996,6 +3003,7 @@ TEST(DebugBreakInWrappedScript) {
// Get rid of the debug event listener.
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CHECK_EQ(1, delegate.break_count());
CheckDebuggerUnloaded();
}
@@ -3234,7 +3242,7 @@ TEST(DebugScriptLineEndsAreAscending) {
Handle<v8::internal::Script> script = Handle<v8::internal::Script>(
v8::internal::Script::cast(instances->get(i)), CcTest::i_isolate());
- v8::internal::Script::InitLineEnds(script);
+ v8::internal::Script::InitLineEnds(CcTest::i_isolate(), script);
v8::internal::FixedArray ends =
v8::internal::FixedArray::cast(script->line_ends());
CHECK_GT(ends.length(), 0);
@@ -3545,10 +3553,8 @@ TEST(AfterCompileEventOnBindToContext) {
ScriptCompiledDelegate delegate;
v8::debug::SetDebugDelegate(isolate, &delegate);
- const char* source = "var a=1";
v8::ScriptCompiler::Source script_source(
- v8::String::NewFromUtf8(isolate, source, v8::NewStringType::kNormal)
- .ToLocalChecked());
+ v8::String::NewFromUtf8Literal(isolate, "var a=1"));
v8::Local<v8::UnboundScript> unbound =
v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source)
@@ -4955,3 +4961,639 @@ TEST(GetPrivateMethodsAndAccessors) {
}
}
}
+
+TEST(GetPrivateStaticMethodsAndAccessors) {
+ i::FLAG_harmony_private_methods = true;
+ LocalContext env;
+ v8::Isolate* v8_isolate = CcTest::isolate();
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Context> context = env.local();
+
+ v8::Local<v8::String> source = v8_str(
+ "var X = class {\n"
+ " static #staticMethod() { }\n"
+ " static get #staticAccessor() { }\n"
+ " static set #staticAccessor(val) { }\n"
+ " static get #staticReadOnly() { }\n"
+ " static set #staticWriteOnly(val) { }\n"
+ "}\n");
+ CompileRun(source);
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "X"))
+ .ToLocalChecked());
+ std::vector<v8::Local<v8::Value>> names;
+ std::vector<v8::Local<v8::Value>> values;
+ CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+
+ CHECK_EQ(names.size(), 4);
+ for (int i = 0; i < 4; i++) {
+ v8::Local<v8::Value> name = names[i];
+ v8::Local<v8::Value> value = values[i];
+ CHECK(name->IsString());
+ std::string name_str = FromString(v8_isolate, name.As<v8::String>());
+ if (name_str == "#staticMethod") {
+ CHECK(value->IsFunction());
+ } else {
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#staticAccessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#staticReadOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
+ } else {
+ CHECK_EQ(name_str, "#staticWriteOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
+ }
+ }
+ }
+}
+
+TEST(GetPrivateStaticAndInstanceMethodsAndAccessors) {
+ i::FLAG_harmony_private_methods = true;
+ LocalContext env;
+ v8::Isolate* v8_isolate = CcTest::isolate();
+ v8::HandleScope scope(v8_isolate);
+ v8::Local<v8::Context> context = env.local();
+
+ v8::Local<v8::String> source = v8_str(
+ "var X = class {\n"
+ " static #staticMethod() { }\n"
+ " static get #staticAccessor() { }\n"
+ " static set #staticAccessor(val) { }\n"
+ " static get #staticReadOnly() { }\n"
+ " static set #staticWriteOnly(val) { }\n"
+ " #method() { }\n"
+ " get #accessor() { }\n"
+ " set #accessor(val) { }\n"
+ " get #readOnly() { }\n"
+ " set #writeOnly(val) { }\n"
+ "}\n"
+ "var x = new X()\n");
+ CompileRun(source);
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "X"))
+ .ToLocalChecked());
+ std::vector<v8::Local<v8::Value>> names;
+ std::vector<v8::Local<v8::Value>> values;
+ CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+
+ CHECK_EQ(names.size(), 4);
+ for (int i = 0; i < 4; i++) {
+ v8::Local<v8::Value> name = names[i];
+ v8::Local<v8::Value> value = values[i];
+ CHECK(name->IsString());
+ std::string name_str = FromString(v8_isolate, name.As<v8::String>());
+ if (name_str == "#staticMethod") {
+ CHECK(value->IsFunction());
+ } else {
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#staticAccessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#staticReadOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
+ } else {
+ CHECK_EQ(name_str, "#staticWriteOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
+ }
+ }
+ }
+
+ names.clear();
+ values.clear();
+ object = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "x"))
+ .ToLocalChecked());
+ CHECK(v8::debug::GetPrivateMembers(context, object, &names, &values));
+
+ CHECK_EQ(names.size(), 4);
+ for (int i = 0; i < 4; i++) {
+ v8::Local<v8::Value> name = names[i];
+ v8::Local<v8::Value> value = values[i];
+ CHECK(name->IsString());
+ std::string name_str = FromString(v8_isolate, name.As<v8::String>());
+ if (name_str == "#method") {
+ CHECK(value->IsFunction());
+ } else {
+ CHECK(v8::debug::AccessorPair::IsAccessorPair(value));
+ v8::Local<v8::debug::AccessorPair> accessors =
+ value.As<v8::debug::AccessorPair>();
+ if (name_str == "#accessor") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsFunction());
+ } else if (name_str == "#readOnly") {
+ CHECK(accessors->getter()->IsFunction());
+ CHECK(accessors->setter()->IsNull());
+ } else {
+ CHECK_EQ(name_str, "#writeOnly");
+ CHECK(accessors->getter()->IsNull());
+ CHECK(accessors->setter()->IsFunction());
+ }
+ }
+ }
+}
+
+namespace {
+class SetTerminateOnResumeDelegate : public v8::debug::DebugDelegate {
+ public:
+ enum Options {
+ kNone,
+ kPerformMicrotaskCheckpointAtBreakpoint,
+ kRunJavaScriptAtBreakpoint
+ };
+ explicit SetTerminateOnResumeDelegate(Options options = kNone)
+ : options_(options) {}
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ const std::vector<v8::debug::BreakpointId>&
+ inspector_break_points_hit) override {
+ break_count_++;
+ v8::Isolate* isolate = paused_context->GetIsolate();
+ v8::debug::SetTerminateOnResume(isolate);
+ if (options_ == kPerformMicrotaskCheckpointAtBreakpoint) {
+ v8::MicrotasksScope::PerformCheckpoint(isolate);
+ }
+ if (options_ == kRunJavaScriptAtBreakpoint) {
+ CompileRun("globalVariable = globalVariable + 1");
+ }
+ }
+
+ void ExceptionThrown(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Value> promise, bool is_uncaught,
+ v8::debug::ExceptionType exception_type) override {
+ exception_thrown_count_++;
+ v8::debug::SetTerminateOnResume(paused_context->GetIsolate());
+ }
+
+ int break_count() const { return break_count_; }
+ int exception_thrown_count() const { return exception_thrown_count_; }
+
+ private:
+ int break_count_ = 0;
+ int exception_thrown_count_ = 0;
+ Options options_;
+};
+} // anonymous namespace
+
+TEST(TerminateOnResumeAtBreakpoint) {
+ break_point_hit_count = 0;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ SetTerminateOnResumeDelegate delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ // If the delegate doesn't request termination on resume from breakpoint,
+ // foo diverges.
+ v8::Script::Compile(
+ context,
+ v8_str(env->GetIsolate(), "function foo(){debugger; while(true){}}"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "foo"))
+ .ToLocalChecked());
+
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 1);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+namespace {
+bool microtask_one_ran = false;
+static void MicrotaskOne(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK(v8::MicrotasksScope::IsRunningMicrotasks(info.GetIsolate()));
+ v8::HandleScope scope(info.GetIsolate());
+ v8::MicrotasksScope microtasks(info.GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ ExpectInt32("1 + 1", 2);
+ microtask_one_ran = true;
+}
+} // namespace
+
+TEST(TerminateOnResumeRunMicrotaskAtBreakpoint) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ SetTerminateOnResumeDelegate delegate(
+ SetTerminateOnResumeDelegate::kPerformMicrotaskCheckpointAtBreakpoint);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ // Enqueue a microtask that gets run while we are paused at the breakpoint.
+ env->GetIsolate()->EnqueueMicrotask(
+ v8::Function::New(env.local(), MicrotaskOne).ToLocalChecked());
+
+ // If the delegate doesn't request termination on resume from breakpoint,
+ // foo diverges.
+ v8::Script::Compile(
+ context,
+ v8_str(env->GetIsolate(), "function foo(){debugger; while(true){}}"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "foo"))
+ .ToLocalChecked());
+
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 1);
+ CHECK(microtask_one_ran);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+TEST(TerminateOnResumeRunJavaScriptAtBreakpoint) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun("var globalVariable = 0;");
+ SetTerminateOnResumeDelegate delegate(
+ SetTerminateOnResumeDelegate::kRunJavaScriptAtBreakpoint);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ // If the delegate doesn't request termination on resume from breakpoint,
+ // foo diverges.
+ v8::Script::Compile(
+ context,
+ v8_str(env->GetIsolate(), "function foo(){debugger; while(true){}}"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "foo"))
+ .ToLocalChecked());
+
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 1);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ ExpectInt32("globalVariable", 1);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+TEST(TerminateOnResumeAtException) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ ChangeBreakOnException(true, true);
+ SetTerminateOnResumeDelegate delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ const char* source = "throw new Error(); while(true){};";
+
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::Local<v8::Function> foo =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 0);
+ CHECK_EQ(delegate.exception_thrown_count(), 1);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+TEST(TerminateOnResumeAtBreakOnEntry) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ SetTerminateOnResumeDelegate delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ v8::Local<v8::Function> builtin =
+ CompileRun("String.prototype.repeat").As<v8::Function>();
+ SetBreakPoint(builtin, 0);
+ v8::Local<v8::Value> val = CompileRun("'b'.repeat(10)");
+ CHECK_EQ(delegate.break_count(), 1);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.exception_thrown_count(), 0);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+TEST(TerminateOnResumeAtBreakOnEntryUserDefinedFunction) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ SetTerminateOnResumeDelegate delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(b) { while (b > 0) {} }", "foo");
+
+ // Run without breakpoints to compile source to bytecode.
+ CompileRun("foo(-1)");
+ CHECK_EQ(delegate.break_count(), 0);
+
+ SetBreakPoint(foo, 0);
+ v8::Local<v8::Value> val = CompileRun("foo(1)");
+ CHECK_EQ(delegate.break_count(), 1);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.exception_thrown_count(), 0);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+TEST(TerminateOnResumeAtUnhandledRejection) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ ChangeBreakOnException(true, true);
+ SetTerminateOnResumeDelegate delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ v8::Local<v8::Function> foo = CompileFunction(
+ &env, "async function foo() { Promise.reject(); while(true) {} }",
+ "foo");
+
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 0);
+ CHECK_EQ(delegate.exception_thrown_count(), 1);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+namespace {
+void RejectPromiseThroughCpp(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ auto data = reinterpret_cast<std::pair<v8::Isolate*, LocalContext*>*>(
+ info.Data().As<v8::External>()->Value());
+
+ v8::Local<v8::String> value1 =
+ v8::String::NewFromUtf8Literal(data->first, "foo");
+
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(data->second->local()).ToLocalChecked();
+ v8::Local<v8::Promise> promise = resolver->GetPromise();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kPending);
+
+ resolver->Reject(data->second->local(), value1).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kRejected);
+ // CHECK_EQ(*v8::Utils::OpenHandle(*promise->Result()),
+ // i::ReadOnlyRoots(CcTest::i_isolate()).exception());
+}
+} // namespace
+
+TEST(TerminateOnResumeAtUnhandledRejectionCppImpl) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(env->GetIsolate());
+ ChangeBreakOnException(true, true);
+ SetTerminateOnResumeDelegate delegate;
+ auto data = std::make_pair(isolate, &env);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ {
+ // We want to trigger a breapoint upon Promise rejection, but we will only
+ // get the callback if there is at least one JavaScript frame in the stack.
+ v8::Local<v8::Function> func =
+ v8::Function::New(env.local(), RejectPromiseThroughCpp,
+ v8::External::New(isolate, &data))
+ .ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("RejectPromiseThroughCpp"), func)
+ .FromJust());
+
+ CompileRun("RejectPromiseThroughCpp(); while (true) {}");
+ CHECK_EQ(delegate.break_count(), 0);
+ CHECK_EQ(delegate.exception_thrown_count(), 1);
+ }
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+namespace {
+static void UnreachableMicrotask(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ UNREACHABLE();
+}
+} // namespace
+
+TEST(TerminateOnResumeFromMicrotask) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ SetTerminateOnResumeDelegate delegate(
+ SetTerminateOnResumeDelegate::kPerformMicrotaskCheckpointAtBreakpoint);
+ ChangeBreakOnException(true, true);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ // Enqueue a microtask that gets run while we are paused at the breakpoint.
+ v8::Local<v8::Function> foo = CompileFunction(
+ &env, "function foo(){ Promise.reject(); while (true) {} }", "foo");
+ env->GetIsolate()->EnqueueMicrotask(foo);
+ env->GetIsolate()->EnqueueMicrotask(
+ v8::Function::New(env.local(), UnreachableMicrotask).ToLocalChecked());
+
+ CHECK_EQ(2,
+ CcTest::i_isolate()->native_context()->microtask_queue()->size());
+
+ v8::MicrotasksScope::PerformCheckpoint(env->GetIsolate());
+
+ CHECK_EQ(0,
+ CcTest::i_isolate()->native_context()->microtask_queue()->size());
+
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 0);
+ CHECK_EQ(delegate.exception_thrown_count(), 1);
+ }
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+class FutexInterruptionThread : public v8::base::Thread {
+ public:
+ FutexInterruptionThread(v8::Isolate* isolate, v8::base::Semaphore* sem)
+ : Thread(Options("FutexInterruptionThread")),
+ isolate_(isolate),
+ sem_(sem) {}
+
+ void Run() override {
+ // Wait a bit before terminating.
+ v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
+ sem_->Wait();
+ v8::debug::SetTerminateOnResume(isolate_);
+ }
+
+ private:
+ v8::Isolate* isolate_;
+ v8::base::Semaphore* sem_;
+};
+
+namespace {
+class SemaphoreTriggerOnBreak : public v8::debug::DebugDelegate {
+ public:
+ SemaphoreTriggerOnBreak() : sem_(0) {}
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ const std::vector<v8::debug::BreakpointId>&
+ inspector_break_points_hit) override {
+ break_count_++;
+ sem_.Signal();
+ }
+
+ v8::base::Semaphore* semaphore() { return &sem_; }
+ int break_count() const { return break_count_; }
+
+ private:
+ v8::base::Semaphore sem_;
+ int break_count_ = 0;
+};
+} // anonymous namespace
+
+TEST(TerminateOnResumeFromOtherThread) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ ChangeBreakOnException(true, true);
+
+ SemaphoreTriggerOnBreak delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+
+ FutexInterruptionThread timeout_thread(env->GetIsolate(),
+ delegate.semaphore());
+ CHECK(timeout_thread.Start());
+
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ const char* source = "debugger; while(true){};";
+
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::Local<v8::Function> foo =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 1);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+namespace {
+class InterruptionBreakRightNow : public v8::base::Thread {
+ public:
+ explicit InterruptionBreakRightNow(v8::Isolate* isolate)
+ : Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
+
+ void Run() override {
+ // Wait a bit before terminating.
+ v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
+ isolate_->RequestInterrupt(BreakRightNow, nullptr);
+ }
+
+ private:
+ static void BreakRightNow(v8::Isolate* isolate, void* data) {
+ v8::debug::BreakRightNow(isolate);
+ }
+ v8::Isolate* isolate_;
+};
+
+} // anonymous namespace
+
+TEST(TerminateOnResumeAtInterruptFromOtherThread) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ ChangeBreakOnException(true, true);
+
+ SetTerminateOnResumeDelegate delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+
+ InterruptionBreakRightNow timeout_thread(env->GetIsolate());
+
+ v8::Local<v8::Context> context = env.local();
+ {
+ v8::TryCatch try_catch(env->GetIsolate());
+ const char* source = "while(true){}";
+
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ v8::Local<v8::Function> foo =
+ v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 0, nullptr, 0, nullptr)
+ .ToLocalChecked();
+
+ CHECK(timeout_thread.Start());
+ v8::MaybeLocal<v8::Value> val =
+ foo->Call(context, env->Global(), 0, nullptr);
+ CHECK(val.IsEmpty());
+ CHECK(try_catch.HasTerminated());
+ CHECK_EQ(delegate.break_count(), 1);
+ }
+ // Exiting the TryCatch brought the isolate back to a state where JavaScript
+ // can be executed.
+ ExpectInt32("1 + 1", 2);
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 14c30cd362..e2b5772654 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -147,9 +147,7 @@ void DeclarationContext::Check(const char* source, int get, int set, int query,
catcher.SetVerbose(true);
Local<Context> context = CcTest::isolate()->GetCurrentContext();
MaybeLocal<Script> script = Script::Compile(
- context,
- String::NewFromUtf8(CcTest::isolate(), source, v8::NewStringType::kNormal)
- .ToLocalChecked());
+ context, String::NewFromUtf8(CcTest::isolate(), source).ToLocalChecked());
if (expectations == EXPECT_ERROR) {
CHECK(script.IsEmpty());
return;
@@ -445,9 +443,8 @@ class SimpleContext {
TryCatch catcher(context_->GetIsolate());
catcher.SetVerbose(true);
MaybeLocal<Script> script = Script::Compile(
- context_, String::NewFromUtf8(context_->GetIsolate(), source,
- v8::NewStringType::kNormal)
- .ToLocalChecked());
+ context_,
+ String::NewFromUtf8(context_->GetIsolate(), source).ToLocalChecked());
if (expectations == EXPECT_ERROR) {
CHECK(script.IsEmpty());
return;
@@ -748,14 +745,10 @@ TEST(CrossScriptDynamicLookup) {
{
SimpleContext context;
- Local<String> undefined_string =
- String::NewFromUtf8(CcTest::isolate(), "undefined",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> number_string =
- String::NewFromUtf8(CcTest::isolate(), "number",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ Local<String> undefined_string = String::NewFromUtf8Literal(
+ CcTest::isolate(), "undefined", v8::NewStringType::kInternalized);
+ Local<String> number_string = String::NewFromUtf8Literal(
+ CcTest::isolate(), "number", v8::NewStringType::kInternalized);
context.Check(
"function f(o) { with(o) { return x; } }"
@@ -825,14 +818,10 @@ TEST(CrossScriptStaticLookupUndeclared) {
{
SimpleContext context;
- Local<String> undefined_string =
- String::NewFromUtf8(CcTest::isolate(), "undefined",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> number_string =
- String::NewFromUtf8(CcTest::isolate(), "number",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
+ Local<String> undefined_string = String::NewFromUtf8Literal(
+ CcTest::isolate(), "undefined", v8::NewStringType::kInternalized);
+ Local<String> number_string = String::NewFromUtf8Literal(
+ CcTest::isolate(), "number", v8::NewStringType::kInternalized);
context.Check(
"function f(o) { return x; }"
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 2b46d7ed11..93a0c9a807 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -1874,11 +1874,45 @@ TEST_(system_msr) {
TEST_(system_nop) {
- SET_UP_ASM();
+ {
+ SET_UP_ASM();
+ COMPARE(nop(), "nop");
+ CLEANUP();
+ }
+ {
+ SET_UP_MASM();
+ COMPARE(Nop(), "nop");
+ CLEANUP();
+ }
+}
- COMPARE(nop(), "nop");
+TEST_(bti) {
+ {
+ SET_UP_ASM();
- CLEANUP();
+ COMPARE(bti(BranchTargetIdentifier::kBti), "bti");
+ COMPARE(bti(BranchTargetIdentifier::kBtiCall), "bti c");
+ COMPARE(bti(BranchTargetIdentifier::kBtiJump), "bti j");
+ COMPARE(bti(BranchTargetIdentifier::kBtiJumpCall), "bti jc");
+ COMPARE(hint(BTI), "bti");
+ COMPARE(hint(BTI_c), "bti c");
+ COMPARE(hint(BTI_j), "bti j");
+ COMPARE(hint(BTI_jc), "bti jc");
+
+ CLEANUP();
+ }
+
+ {
+ SET_UP_MASM();
+
+ Label dummy1, dummy2, dummy3, dummy4;
+ COMPARE(Bind(&dummy1, BranchTargetIdentifier::kBti), "bti");
+ COMPARE(Bind(&dummy2, BranchTargetIdentifier::kBtiCall), "bti c");
+ COMPARE(Bind(&dummy3, BranchTargetIdentifier::kBtiJump), "bti j");
+ COMPARE(Bind(&dummy4, BranchTargetIdentifier::kBtiJumpCall), "bti jc");
+
+ CLEANUP();
+ }
}
TEST(system_pauth) {
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 686b1e122e..cc4f5cc296 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -592,6 +592,7 @@ TEST(DisasmIa320) {
if (CpuFeatures::IsSupported(SSSE3)) {
CpuFeatureScope scope(&assm, SSSE3);
SSSE3_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
+ SSSE3_UNOP_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
__ palignr(xmm5, xmm1, 5);
__ palignr(xmm5, Operand(edx, 4), 5);
}
@@ -802,6 +803,7 @@ TEST(DisasmIa320) {
__ v##instruction(xmm5, xmm1); \
__ v##instruction(xmm5, Operand(edx, 4));
+ SSSE3_UNOP_INSTRUCTION_LIST(EMIT_SSE4_RM_AVXINSTR)
SSE4_RM_INSTRUCTION_LIST(EMIT_SSE4_RM_AVXINSTR)
#undef EMIT_SSE4_RM_AVXINSTR
}
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 5e85cb5a08..b9cf05bcc9 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -391,8 +391,6 @@ TEST(DisasmX64) {
// Move operation
__ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
__ cvttss2si(rdx, xmm1);
- __ cvtsd2ss(xmm0, xmm1);
- __ cvtsd2ss(xmm0, Operand(rbx, rcx, times_4, 10000));
__ cvttps2dq(xmm0, xmm1);
__ cvttps2dq(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movaps(xmm0, xmm1);
@@ -437,13 +435,6 @@ TEST(DisasmX64) {
__ ucomisd(xmm0, xmm1);
- __ andpd(xmm0, xmm1);
- __ andpd(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ orpd(xmm0, xmm1);
- __ orpd(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ xorpd(xmm0, xmm1);
- __ xorpd(xmm0, Operand(rbx, rcx, times_4, 10000));
-
__ pcmpeqd(xmm1, xmm0);
__ punpckldq(xmm1, xmm11);
@@ -458,6 +449,7 @@ TEST(DisasmX64) {
__ instruction(xmm5, Operand(rdx, 4));
SSE2_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
+ SSE2_UNOP_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
SSE2_INSTRUCTION_LIST_SD(EMIT_SSE2_INSTR)
#undef EMIT_SSE2_INSTR
@@ -514,6 +506,7 @@ TEST(DisasmX64) {
__ palignr(xmm5, xmm1, 5);
__ palignr(xmm5, Operand(rdx, 4), 5);
SSSE3_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
+ SSSE3_UNOP_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
}
}
@@ -579,7 +572,7 @@ TEST(DisasmX64) {
__ blendvpd(xmm5, Operand(rdx, 4));
SSE4_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
- SSE4_PMOV_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
+ SSE4_UNOP_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
SSE4_EXTRACT_INSTRUCTION_LIST(EMIT_SSE34_IMM_INSTR)
}
}
@@ -635,29 +628,14 @@ TEST(DisasmX64) {
__ vmovdqu(xmm9, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(Operand(rbx, rcx, times_4, 10000), xmm0);
- __ vaddsd(xmm0, xmm1, xmm2);
- __ vaddsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
- __ vmulsd(xmm0, xmm1, xmm2);
- __ vmulsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
- __ vsubsd(xmm0, xmm1, xmm2);
- __ vsubsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
- __ vdivsd(xmm0, xmm1, xmm2);
- __ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_2, 10000));
- __ vminsd(xmm8, xmm1, xmm2);
- __ vminsd(xmm9, xmm1, Operand(rbx, rcx, times_8, 10000));
- __ vmaxsd(xmm8, xmm1, xmm2);
- __ vmaxsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
__ vroundss(xmm9, xmm1, xmm2, kRoundDown);
__ vroundsd(xmm8, xmm3, xmm0, kRoundDown);
- __ vsqrtsd(xmm8, xmm1, xmm2);
- __ vsqrtsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
__ vucomisd(xmm9, xmm1);
__ vucomisd(xmm8, Operand(rbx, rdx, times_2, 10981));
__ vcvtss2sd(xmm4, xmm9, xmm11);
- __ vcvtsd2ss(xmm9, xmm3, xmm2);
__ vcvtss2sd(xmm4, xmm9, Operand(rbx, rcx, times_1, 10000));
- __ vcvtsd2ss(xmm9, xmm3, Operand(rbx, rcx, times_1, 10000));
+ __ vcvttps2dq(xmm4, xmm11);
__ vcvtlsi2sd(xmm5, xmm9, rcx);
__ vcvtlsi2sd(xmm9, xmm3, Operand(rbx, r9, times_4, 10000));
__ vcvtqsi2sd(xmm5, xmm9, r11);
@@ -687,13 +665,6 @@ TEST(DisasmX64) {
__ vhaddps(xmm0, xmm1, xmm9);
__ vhaddps(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
- __ vandpd(xmm0, xmm9, xmm2);
- __ vandpd(xmm9, xmm1, Operand(rbx, rcx, times_4, 10000));
- __ vorpd(xmm0, xmm1, xmm9);
- __ vorpd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
- __ vxorpd(xmm0, xmm1, xmm9);
- __ vxorpd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
-
__ vpcmpeqd(xmm0, xmm15, xmm5);
__ vpcmpeqd(xmm15, xmm0, Operand(rbx, rcx, times_4, 10000));
@@ -744,23 +715,44 @@ TEST(DisasmX64) {
__ v##instruction(xmm10, xmm5, xmm1); \
__ v##instruction(xmm10, xmm5, Operand(rdx, 4));
+ SSE2_INSTRUCTION_LIST(EMIT_SSE2_AVXINSTR)
+#undef EMIT_SSE2_AVXINSTR
+
+#define EMIT_SSE2_UNOP_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
+ __ v##instruction(xmm10, xmm1); \
+ __ v##instruction(xmm10, Operand(rdx, 4));
+
+ SSE2_UNOP_INSTRUCTION_LIST(EMIT_SSE2_UNOP_AVXINSTR)
+#undef EMIT_SSE2_AVXINSTR
+
+#define EMIT_SSE2_SD_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
+ __ v##instruction(xmm10, xmm5, xmm1); \
+ __ v##instruction(xmm10, xmm5, Operand(rbx, rcx, times_4, 10000));
+ SSE2_INSTRUCTION_LIST_SD(EMIT_SSE2_SD_AVXINSTR)
+#undef EMIT_SSE2_SD_AVXINSTR
+
#define EMIT_SSE34_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3, \
notUsed4) \
__ v##instruction(xmm10, xmm5, xmm1); \
__ v##instruction(xmm10, xmm5, Operand(rdx, 4));
- SSE2_INSTRUCTION_LIST(EMIT_SSE2_AVXINSTR)
SSSE3_INSTRUCTION_LIST(EMIT_SSE34_AVXINSTR)
SSE4_INSTRUCTION_LIST(EMIT_SSE34_AVXINSTR)
SSE4_2_INSTRUCTION_LIST(EMIT_SSE34_AVXINSTR)
-#undef EMIT_SSE2_AVXINSTR
#undef EMIT_SSE34_AVXINSTR
+#define EMIT_SSSE3_UNOP_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3, \
+ notUsed4) \
+ __ v##instruction(xmm9, xmm3); \
+ __ v##instruction(xmm9, Operand(rdx, 5));
+ SSSE3_UNOP_INSTRUCTION_LIST(EMIT_SSSE3_UNOP_AVXINSTR)
+#undef EMIT_SSSE3_UNOP_AVXINSTR
+
#define EMIT_SSE4_PMOV_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3, \
notUsed4) \
__ v##instruction(xmm10, xmm1); \
__ v##instruction(xmm10, Operand(rdx, 4));
- SSE4_PMOV_INSTRUCTION_LIST(EMIT_SSE4_PMOV_AVXINSTR)
+ SSE4_UNOP_INSTRUCTION_LIST(EMIT_SSE4_PMOV_AVXINSTR)
#undef EMIT_SSE4_PMOV_AVXINSTR
#define EMIT_SSE2_SHIFT_IMM_AVX(instruction, notUsed1, notUsed2, notUsed3, \
@@ -791,8 +783,11 @@ TEST(DisasmX64) {
__ vpinsrq(xmm1, xmm2, rax, 9);
__ vpinsrq(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 9);
__ vpshufd(xmm1, xmm2, 85);
+ __ vpshufd(xmm1, Operand(rbx, rcx, times_4, 10000), 85);
__ vpshuflw(xmm1, xmm2, 85);
__ vpshuflw(xmm1, Operand(rbx, rcx, times_4, 10000), 85);
+ __ vpshufhw(xmm1, xmm2, 85);
+ __ vpshufhw(xmm1, Operand(rbx, rcx, times_4, 10000), 85);
__ vshufps(xmm3, xmm2, xmm3, 3);
__ vpblendw(xmm1, xmm2, xmm3, 23);
__ vpblendw(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 23);
diff --git a/deps/v8/test/cctest/test-factory.cc b/deps/v8/test/cctest/test-factory.cc
index 4ed7cb7bca..98823f1ec9 100644
--- a/deps/v8/test/cctest/test-factory.cc
+++ b/deps/v8/test/cctest/test-factory.cc
@@ -20,7 +20,8 @@ namespace {
// This needs to be large enough to create a new nosnap Isolate, but smaller
// than kMaximalCodeRangeSize so we can recover from the OOM.
constexpr int kInstructionSize = 100 * MB;
-STATIC_ASSERT(kInstructionSize < kMaximalCodeRangeSize || !kRequiresCodeRange);
+STATIC_ASSERT(kInstructionSize < kMaximalCodeRangeSize ||
+ !kPlatformRequiresCodeRange);
size_t NearHeapLimitCallback(void* raw_bool, size_t current_heap_limit,
size_t initial_heap_limit) {
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 91db7e51a5..a15eeff133 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -216,6 +216,39 @@ TEST(VectorCallFeedback) {
CHECK_EQ(MONOMORPHIC, nexus.ic_state());
}
+TEST(VectorPolymorphicCallFeedback) {
+ if (!i::FLAG_use_ic) return;
+ if (i::FLAG_always_opt) return;
+ FLAG_allow_natives_syntax = true;
+ FLAG_lazy_feedback_allocation = false;
+
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ // Make sure the call feedback of a() in f() becomes polymorphic.
+ CompileRun(
+ "function foo_maker() { return () => { return 17; } }"
+ "a_foo = foo_maker();"
+ "function f(a) { a(); } f(foo_maker());"
+ "f(foo_maker());");
+ Handle<JSFunction> f = GetFunction("f");
+ Handle<JSFunction> a_foo = GetFunction("a_foo");
+ // There should be one IC.
+ Handle<FeedbackVector> feedback_vector =
+ Handle<FeedbackVector>(f->feedback_vector(), isolate);
+ FeedbackSlot slot(0);
+ FeedbackNexus nexus(feedback_vector, slot);
+
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
+ HeapObject heap_object;
+ CHECK(nexus.GetFeedback()->GetHeapObjectIfWeak(&heap_object));
+ CHECK(heap_object.IsFeedbackCell(isolate));
+ // Ensure this is the feedback cell for the closure returned by
+ // foo_maker.
+ CHECK_EQ(heap_object, a_foo->raw_feedback_cell());
+}
+
TEST(VectorCallFeedbackForArray) {
if (!i::FLAG_use_ic) return;
if (i::FLAG_always_opt) return;
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 9deb1ff70c..840478a520 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -11,7 +11,7 @@
#include "src/execution/execution.h"
#include "src/handles/global-handles.h"
-#include "src/heap/factory.h"
+#include "src/heap/factory-inl.h"
#include "src/ic/stub-cache.h"
#include "src/objects/field-type.h"
#include "src/objects/heap-number-inl.h"
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 73f302f691..ab210ae162 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -94,8 +94,7 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
static v8::Local<v8::Script> Compile(v8::Isolate* isolate, const char* src) {
return v8::Script::Compile(
isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, src, v8::NewStringType::kNormal)
- .ToLocalChecked())
+ v8::String::NewFromUtf8(isolate, src).ToLocalChecked())
.ToLocalChecked();
}
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index a3c8fb7bde..b4da8ce544 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -98,6 +98,15 @@ void ConstructJSObject(v8::Isolate* isolate, v8::Global<v8::Object>* global) {
CHECK(!global->IsEmpty());
}
+void ConstructJSObject(v8::Isolate* isolate,
+ v8::TracedGlobal<v8::Object>* traced) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object(v8::Object::New(isolate));
+ CHECK(!object.IsEmpty());
+ *traced = v8::TracedGlobal<v8::Object>(isolate, object);
+ CHECK(!traced->IsEmpty());
+}
+
template <typename HandleContainer>
void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
HandleContainer* flag_and_persistent) {
@@ -437,10 +446,8 @@ TEST(TracedGlobalToJSApiObjectWithModifiedMapSurvivesScavenge) {
// Create an API object which does not have the same map as constructor.
auto function_template = FunctionTemplate::New(isolate);
auto instance_t = function_template->InstanceTemplate();
- instance_t->Set(
- v8::String::NewFromUtf8(isolate, "a", NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(isolate, 10));
+ instance_t->Set(v8::String::NewFromUtf8Literal(isolate, "a"),
+ v8::Number::New(isolate, 10));
auto function =
function_template->GetFunction(context.local()).ToLocalChecked();
auto i = function->NewInstance(context.local()).ToLocalChecked();
@@ -462,14 +469,10 @@ TEST(TracedGlobalTOJsApiObjectWithElementsSurvivesScavenge) {
// Create an API object which has elements.
auto function_template = FunctionTemplate::New(isolate);
auto instance_t = function_template->InstanceTemplate();
- instance_t->Set(
- v8::String::NewFromUtf8(isolate, "1", NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(isolate, 10));
- instance_t->Set(
- v8::String::NewFromUtf8(isolate, "2", NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(isolate, 10));
+ instance_t->Set(v8::String::NewFromUtf8Literal(isolate, "1"),
+ v8::Number::New(isolate, 10));
+ instance_t->Set(v8::String::NewFromUtf8Literal(isolate, "2"),
+ v8::Number::New(isolate, 10));
auto function =
function_template->GetFunction(context.local()).ToLocalChecked();
auto i = function->NewInstance(context.local()).ToLocalChecked();
@@ -667,5 +670,39 @@ TEST(MoveWeakGlobal) {
InvokeMarkSweep();
}
+TEST(TotalSizeRegularNode) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Global<v8::Object>* global = new Global<v8::Object>();
+ CHECK_EQ(i_isolate->global_handles()->TotalSize(), 0);
+ CHECK_EQ(i_isolate->global_handles()->UsedSize(), 0);
+ ConstructJSObject(isolate, global);
+ CHECK_GT(i_isolate->global_handles()->TotalSize(), 0);
+ CHECK_GT(i_isolate->global_handles()->UsedSize(), 0);
+ delete global;
+ CHECK_GT(i_isolate->global_handles()->TotalSize(), 0);
+ CHECK_EQ(i_isolate->global_handles()->UsedSize(), 0);
+}
+
+TEST(TotalSizeTracedNode) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::TracedGlobal<v8::Object>* global = new TracedGlobal<v8::Object>();
+ CHECK_EQ(i_isolate->global_handles()->TotalSize(), 0);
+ CHECK_EQ(i_isolate->global_handles()->UsedSize(), 0);
+ ConstructJSObject(isolate, global);
+ CHECK_GT(i_isolate->global_handles()->TotalSize(), 0);
+ CHECK_GT(i_isolate->global_handles()->UsedSize(), 0);
+ delete global;
+ CHECK_GT(i_isolate->global_handles()->TotalSize(), 0);
+ CHECK_EQ(i_isolate->global_handles()->UsedSize(), 0);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 4d70d66724..63c88ae9b2 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -4053,10 +4053,9 @@ TEST(WeakReference) {
// Create a FeedbackVector.
v8::Local<v8::Script> script =
- v8::Script::Compile(isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, "function foo() {}",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
+ v8::Script::Compile(
+ isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8Literal(isolate, "function foo() {}"))
.ToLocalChecked();
v8::MaybeLocal<v8::Value> value = script->Run(isolate->GetCurrentContext());
CHECK(!value.IsEmpty());
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index 95cc3c7824..e8c89b7232 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -34,6 +34,7 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
__ addl(rax, Immediate(1));
}
#elif V8_TARGET_ARCH_ARM64
+ __ CodeEntry();
for (int i = 0; i < kNumInstr; ++i) {
__ Add(x0, x0, Operand(1));
}
@@ -51,7 +52,7 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
for (int i = 0; i < kNumInstr; ++i) {
__ Addu(v0, v0, Operand(1));
}
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
for (int i = 0; i < kNumInstr; ++i) {
__ addi(r3, r3, Operand(1));
}
@@ -73,6 +74,8 @@ static void FloodWithNop(Isolate* isolate, TestingAssemblerBuffer* buffer) {
__ mov(eax, Operand(esp, kSystemPointerSize));
#elif V8_TARGET_ARCH_X64
__ movl(rax, arg_reg_1);
+#elif V8_TARGET_ARCH_ARM64
+ __ CodeEntry();
#elif V8_TARGET_ARCH_MIPS
__ mov(v0, a0);
#elif V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/test/cctest/test-inspector.cc b/deps/v8/test/cctest/test-inspector.cc
index 2f7e8b1ea1..6dd2aefb9e 100644
--- a/deps/v8/test/cctest/test-inspector.cc
+++ b/deps/v8/test/cctest/test-inspector.cc
@@ -9,9 +9,7 @@
#include "include/v8-inspector.h"
#include "include/v8.h"
#include "src/inspector/protocol/Runtime.h"
-#include "src/inspector/string-16.h"
-using v8_inspector::String16;
using v8_inspector::StringBuffer;
using v8_inspector::StringView;
using v8_inspector::V8ContextInfo;
@@ -65,16 +63,3 @@ TEST(WrapInsideWrapOnInterrupt) {
isolate->RequestInterrupt(&WrapOnInterrupt, session.get());
session->wrapObject(env.local(), v8::Null(isolate), object_group_view, false);
}
-
-TEST(String16EndianTest) {
- const v8_inspector::UChar* expected =
- reinterpret_cast<const v8_inspector::UChar*>(u"Hello, \U0001F30E.");
- const uint16_t* utf16le = reinterpret_cast<const uint16_t*>(
- "H\0e\0l\0l\0o\0,\0 \0\x3c\xd8\x0e\xdf.\0"); // Same text in UTF16LE
- // encoding
-
- String16 utf16_str = String16::fromUTF16LE(utf16le, 10);
- String16 expected_str = expected;
-
- CHECK_EQ(utf16_str, expected_str);
-}
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index db43f46e71..6759c37128 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -16,23 +16,24 @@ namespace internal {
namespace {
-Handle<JSFinalizationGroup> ConstructJSFinalizationGroup(Isolate* isolate) {
+Handle<JSFinalizationRegistry> ConstructJSFinalizationRegistry(
+ Isolate* isolate) {
Factory* factory = isolate->factory();
- Handle<String> finalization_group_name =
- factory->NewStringFromStaticChars("FinalizationGroup");
+ Handle<String> finalization_registry_name =
+ factory->NewStringFromStaticChars("FinalizationRegistry");
Handle<Object> global =
handle(isolate->native_context()->global_object(), isolate);
- Handle<JSFunction> finalization_group_fun = Handle<JSFunction>::cast(
- Object::GetProperty(isolate, global, finalization_group_name)
+ Handle<JSFunction> finalization_registry_fun = Handle<JSFunction>::cast(
+ Object::GetProperty(isolate, global, finalization_registry_name)
.ToHandleChecked());
- auto finalization_group = Handle<JSFinalizationGroup>::cast(
- JSObject::New(finalization_group_fun, finalization_group_fun,
+ auto finalization_registry = Handle<JSFinalizationRegistry>::cast(
+ JSObject::New(finalization_registry_fun, finalization_registry_fun,
Handle<AllocationSite>::null())
.ToHandleChecked());
#ifdef VERIFY_HEAP
- finalization_group->JSFinalizationGroupVerify(isolate);
+ finalization_registry->JSFinalizationRegistryVerify(isolate);
#endif // VERIFY_HEAP
- return finalization_group;
+ return finalization_registry;
}
Handle<JSWeakRef> ConstructJSWeakRef(Handle<JSReceiver> target,
@@ -64,27 +65,28 @@ Handle<JSObject> CreateKey(const char* key_prop_value, Isolate* isolate) {
return key;
}
-Handle<WeakCell> FinalizationGroupRegister(
- Handle<JSFinalizationGroup> finalization_group, Handle<JSObject> target,
- Handle<Object> holdings, Handle<Object> key, Isolate* isolate) {
- JSFinalizationGroup::Register(finalization_group, target, holdings, key,
- isolate);
- CHECK(finalization_group->active_cells().IsWeakCell());
+Handle<WeakCell> FinalizationRegistryRegister(
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<JSObject> target, Handle<Object> holdings, Handle<Object> key,
+ Isolate* isolate) {
+ JSFinalizationRegistry::Register(finalization_registry, target, holdings, key,
+ isolate);
+ CHECK(finalization_registry->active_cells().IsWeakCell());
Handle<WeakCell> weak_cell =
- handle(WeakCell::cast(finalization_group->active_cells()), isolate);
+ handle(WeakCell::cast(finalization_registry->active_cells()), isolate);
#ifdef VERIFY_HEAP
weak_cell->WeakCellVerify(isolate);
#endif // VERIFY_HEAP
return weak_cell;
}
-Handle<WeakCell> FinalizationGroupRegister(
- Handle<JSFinalizationGroup> finalization_group, Handle<JSObject> target,
- Isolate* isolate) {
+Handle<WeakCell> FinalizationRegistryRegister(
+ Handle<JSFinalizationRegistry> finalization_registry,
+ Handle<JSObject> target, Isolate* isolate) {
Handle<Object> undefined =
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
- return FinalizationGroupRegister(finalization_group, target, undefined,
- undefined, isolate);
+ return FinalizationRegistryRegister(finalization_registry, target, undefined,
+ undefined, isolate);
}
void NullifyWeakCell(Handle<WeakCell> weak_cell, Isolate* isolate) {
@@ -158,6 +160,17 @@ void VerifyWeakCellKeyChain(Isolate* isolate, SimpleNumberDictionary key_map,
va_end(args);
}
+Handle<JSWeakRef> MakeWeakRefAndKeepDuringJob(Isolate* isolate) {
+ HandleScope inner_scope(isolate);
+
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
+ isolate->heap()->KeepDuringJob(js_object);
+
+ return inner_scope.CloseAndEscape(inner_weak_ref);
+}
+
} // namespace
TEST(TestRegister) {
@@ -166,36 +179,36 @@ TEST(TestRegister) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
// Register a weak reference and verify internal data structures.
Handle<WeakCell> weak_cell1 =
- FinalizationGroupRegister(finalization_group, js_object, isolate);
+ FinalizationRegistryRegister(finalization_registry, js_object, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 1,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 1,
*weak_cell1);
CHECK(weak_cell1->key_list_prev().IsUndefined(isolate));
CHECK(weak_cell1->key_list_next().IsUndefined(isolate));
- CHECK(finalization_group->cleared_cells().IsUndefined(isolate));
+ CHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
// No key was used during registration, key-based map stays uninitialized.
- CHECK(finalization_group->key_map().IsUndefined(isolate));
+ CHECK(finalization_registry->key_map().IsUndefined(isolate));
// Register another weak reference and verify internal data structures.
Handle<WeakCell> weak_cell2 =
- FinalizationGroupRegister(finalization_group, js_object, isolate);
+ FinalizationRegistryRegister(finalization_registry, js_object, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 2,
*weak_cell2, *weak_cell1);
CHECK(weak_cell2->key_list_prev().IsUndefined(isolate));
CHECK(weak_cell2->key_list_next().IsUndefined(isolate));
- CHECK(finalization_group->cleared_cells().IsUndefined(isolate));
- CHECK(finalization_group->key_map().IsUndefined(isolate));
+ CHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
+ CHECK(finalization_registry->key_map().IsUndefined(isolate));
}
TEST(TestRegisterWithKey) {
@@ -204,8 +217,8 @@ TEST(TestRegisterWithKey) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -215,36 +228,36 @@ TEST(TestRegisterWithKey) {
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
// Register a weak reference with a key and verify internal data structures.
- Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1 = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 1, *weak_cell1);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 0);
}
// Register another weak reference with a different key and verify internal
// data structures.
- Handle<WeakCell> weak_cell2 = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2 = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 1, *weak_cell1);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 1, *weak_cell2);
}
// Register another weak reference with token1 and verify internal data
// structures.
- Handle<WeakCell> weak_cell3 = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell3 = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell3,
*weak_cell1);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 1, *weak_cell2);
@@ -257,29 +270,29 @@ TEST(TestWeakCellNullify1) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<WeakCell> weak_cell1 =
- FinalizationGroupRegister(finalization_group, js_object, isolate);
+ FinalizationRegistryRegister(finalization_registry, js_object, isolate);
Handle<WeakCell> weak_cell2 =
- FinalizationGroupRegister(finalization_group, js_object, isolate);
+ FinalizationRegistryRegister(finalization_registry, js_object, isolate);
// Nullify the first WeakCell and verify internal data structures.
NullifyWeakCell(weak_cell1, isolate);
- CHECK_EQ(finalization_group->active_cells(), *weak_cell2);
+ CHECK_EQ(finalization_registry->active_cells(), *weak_cell2);
CHECK(weak_cell2->prev().IsUndefined(isolate));
CHECK(weak_cell2->next().IsUndefined(isolate));
- CHECK_EQ(finalization_group->cleared_cells(), *weak_cell1);
+ CHECK_EQ(finalization_registry->cleared_cells(), *weak_cell1);
CHECK(weak_cell1->prev().IsUndefined(isolate));
CHECK(weak_cell1->next().IsUndefined(isolate));
// Nullify the second WeakCell and verify internal data structures.
NullifyWeakCell(weak_cell2, isolate);
- CHECK(finalization_group->active_cells().IsUndefined(isolate));
- CHECK_EQ(finalization_group->cleared_cells(), *weak_cell2);
+ CHECK(finalization_registry->active_cells().IsUndefined(isolate));
+ CHECK_EQ(finalization_registry->cleared_cells(), *weak_cell2);
CHECK_EQ(weak_cell2->next(), *weak_cell1);
CHECK(weak_cell2->prev().IsUndefined(isolate));
CHECK_EQ(weak_cell1->prev(), *weak_cell2);
@@ -292,92 +305,92 @@ TEST(TestWeakCellNullify2) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<WeakCell> weak_cell1 =
- FinalizationGroupRegister(finalization_group, js_object, isolate);
+ FinalizationRegistryRegister(finalization_registry, js_object, isolate);
Handle<WeakCell> weak_cell2 =
- FinalizationGroupRegister(finalization_group, js_object, isolate);
+ FinalizationRegistryRegister(finalization_registry, js_object, isolate);
// Like TestWeakCellNullify1 but nullify the WeakCells in opposite order.
NullifyWeakCell(weak_cell2, isolate);
- CHECK_EQ(finalization_group->active_cells(), *weak_cell1);
+ CHECK_EQ(finalization_registry->active_cells(), *weak_cell1);
CHECK(weak_cell1->prev().IsUndefined(isolate));
CHECK(weak_cell1->next().IsUndefined(isolate));
- CHECK_EQ(finalization_group->cleared_cells(), *weak_cell2);
+ CHECK_EQ(finalization_registry->cleared_cells(), *weak_cell2);
CHECK(weak_cell2->prev().IsUndefined(isolate));
CHECK(weak_cell2->next().IsUndefined(isolate));
NullifyWeakCell(weak_cell1, isolate);
- CHECK(finalization_group->active_cells().IsUndefined(isolate));
- CHECK_EQ(finalization_group->cleared_cells(), *weak_cell1);
+ CHECK(finalization_registry->active_cells().IsUndefined(isolate));
+ CHECK_EQ(finalization_registry->cleared_cells(), *weak_cell1);
CHECK_EQ(weak_cell1->next(), *weak_cell2);
CHECK(weak_cell1->prev().IsUndefined(isolate));
CHECK_EQ(weak_cell2->prev(), *weak_cell1);
CHECK(weak_cell2->next().IsUndefined(isolate));
}
-TEST(TestJSFinalizationGroupPopClearedCellHoldings1) {
+TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<Object> undefined =
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
Handle<Object> holdings1 = factory->NewStringFromAsciiChecked("holdings1");
- Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
- finalization_group, js_object, holdings1, undefined, isolate);
+ Handle<WeakCell> weak_cell1 = FinalizationRegistryRegister(
+ finalization_registry, js_object, holdings1, undefined, isolate);
Handle<Object> holdings2 = factory->NewStringFromAsciiChecked("holdings2");
- Handle<WeakCell> weak_cell2 = FinalizationGroupRegister(
- finalization_group, js_object, holdings2, undefined, isolate);
+ Handle<WeakCell> weak_cell2 = FinalizationRegistryRegister(
+ finalization_registry, js_object, holdings2, undefined, isolate);
Handle<Object> holdings3 = factory->NewStringFromAsciiChecked("holdings3");
- Handle<WeakCell> weak_cell3 = FinalizationGroupRegister(
- finalization_group, js_object, holdings3, undefined, isolate);
+ Handle<WeakCell> weak_cell3 = FinalizationRegistryRegister(
+ finalization_registry, js_object, holdings3, undefined, isolate);
NullifyWeakCell(weak_cell2, isolate);
NullifyWeakCell(weak_cell3, isolate);
- CHECK(finalization_group->NeedsCleanup());
- Object cleared1 =
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK(finalization_registry->NeedsCleanup());
+ Object cleared1 = JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate);
CHECK_EQ(cleared1, *holdings3);
CHECK(weak_cell3->prev().IsUndefined(isolate));
CHECK(weak_cell3->next().IsUndefined(isolate));
- CHECK(finalization_group->NeedsCleanup());
- Object cleared2 =
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK(finalization_registry->NeedsCleanup());
+ Object cleared2 = JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate);
CHECK_EQ(cleared2, *holdings2);
CHECK(weak_cell2->prev().IsUndefined(isolate));
CHECK(weak_cell2->next().IsUndefined(isolate));
- CHECK(!finalization_group->NeedsCleanup());
+ CHECK(!finalization_registry->NeedsCleanup());
NullifyWeakCell(weak_cell1, isolate);
- CHECK(finalization_group->NeedsCleanup());
- Object cleared3 =
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK(finalization_registry->NeedsCleanup());
+ Object cleared3 = JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate);
CHECK_EQ(cleared3, *holdings1);
CHECK(weak_cell1->prev().IsUndefined(isolate));
CHECK(weak_cell1->next().IsUndefined(isolate));
- CHECK(!finalization_group->NeedsCleanup());
- CHECK(finalization_group->active_cells().IsUndefined(isolate));
- CHECK(finalization_group->cleared_cells().IsUndefined(isolate));
+ CHECK(!finalization_registry->NeedsCleanup());
+ CHECK(finalization_registry->active_cells().IsUndefined(isolate));
+ CHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
}
-TEST(TestJSFinalizationGroupPopClearedCellHoldings2) {
+TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
// Test that when all WeakCells for a key are popped, the key is removed from
// the key map.
FLAG_harmony_weak_refs = true;
@@ -386,18 +399,18 @@ TEST(TestJSFinalizationGroupPopClearedCellHoldings2) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<JSObject> token1 = CreateKey("token1", isolate);
Handle<Object> holdings1 = factory->NewStringFromAsciiChecked("holdings1");
- Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
- finalization_group, js_object, holdings1, token1, isolate);
+ Handle<WeakCell> weak_cell1 = FinalizationRegistryRegister(
+ finalization_registry, js_object, holdings1, token1, isolate);
Handle<Object> holdings2 = factory->NewStringFromAsciiChecked("holdings2");
- Handle<WeakCell> weak_cell2 = FinalizationGroupRegister(
- finalization_group, js_object, holdings2, token1, isolate);
+ Handle<WeakCell> weak_cell2 = FinalizationRegistryRegister(
+ finalization_registry, js_object, holdings2, token1, isolate);
NullifyWeakCell(weak_cell1, isolate);
NullifyWeakCell(weak_cell2, isolate);
@@ -406,28 +419,28 @@ TEST(TestJSFinalizationGroupPopClearedCellHoldings2) {
// active_cells to cleared_cells).
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell2,
*weak_cell1);
}
- Object cleared1 =
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ Object cleared1 = JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate);
CHECK_EQ(cleared1, *holdings2);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 1, *weak_cell1);
}
- Object cleared2 =
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ Object cleared2 = JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate);
CHECK_EQ(cleared2, *holdings1);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 0);
}
}
@@ -438,8 +451,8 @@ TEST(TestUnregisterActiveCells) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -448,41 +461,41 @@ TEST(TestUnregisterActiveCells) {
Handle<Object> undefined =
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
- Handle<WeakCell> weak_cell1a = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
- Handle<WeakCell> weak_cell1b = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1a = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1b = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
- Handle<WeakCell> weak_cell2a = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
- Handle<WeakCell> weak_cell2b = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2a = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2b = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 4,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 4,
*weak_cell2b, *weak_cell2a, *weak_cell1b, *weak_cell1a);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell1b,
*weak_cell1a);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 2, *weak_cell2b,
*weak_cell2a);
}
- JSFinalizationGroup::Unregister(finalization_group, token1, isolate);
+ JSFinalizationRegistry::Unregister(finalization_registry, token1, isolate);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 0);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 2, *weak_cell2b,
*weak_cell2a);
}
// Both weak_cell1a and weak_cell1b removed from active_cells.
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 2,
*weak_cell2b, *weak_cell2a);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
}
TEST(TestUnregisterActiveAndClearedCells) {
@@ -491,8 +504,8 @@ TEST(TestUnregisterActiveAndClearedCells) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -501,40 +514,40 @@ TEST(TestUnregisterActiveAndClearedCells) {
Handle<Object> undefined =
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
- Handle<WeakCell> weak_cell1a = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
- Handle<WeakCell> weak_cell1b = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1a = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1b = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
- Handle<WeakCell> weak_cell2a = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
- Handle<WeakCell> weak_cell2b = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2a = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2b = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
NullifyWeakCell(weak_cell2a, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 3,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 3,
*weak_cell2b, *weak_cell1b, *weak_cell1a);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 1,
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 1,
*weak_cell2a);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell1b,
*weak_cell1a);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 2, *weak_cell2b,
*weak_cell2a);
}
- JSFinalizationGroup::Unregister(finalization_group, token2, isolate);
+ JSFinalizationRegistry::Unregister(finalization_registry, token2, isolate);
// Both weak_cell2a and weak_cell2b removed.
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 2,
*weak_cell1b, *weak_cell1a);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell1b,
*weak_cell1a);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 0);
@@ -547,8 +560,8 @@ TEST(TestWeakCellUnregisterTwice) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -556,35 +569,35 @@ TEST(TestWeakCellUnregisterTwice) {
Handle<Object> undefined =
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
- Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1 = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 1,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 1,
*weak_cell1);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 1, *weak_cell1);
}
- JSFinalizationGroup::Unregister(finalization_group, token1, isolate);
+ JSFinalizationRegistry::Unregister(finalization_registry, token1, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 0);
}
- JSFinalizationGroup::Unregister(finalization_group, token1, isolate);
+ JSFinalizationRegistry::Unregister(finalization_registry, token1, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 0);
}
}
@@ -596,37 +609,37 @@ TEST(TestWeakCellUnregisterPopped) {
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<JSObject> token1 = CreateKey("token1", isolate);
Handle<Object> holdings1 = factory->NewStringFromAsciiChecked("holdings1");
- Handle<WeakCell> weak_cell1 = FinalizationGroupRegister(
- finalization_group, js_object, holdings1, token1, isolate);
+ Handle<WeakCell> weak_cell1 = FinalizationRegistryRegister(
+ finalization_registry, js_object, holdings1, token1, isolate);
NullifyWeakCell(weak_cell1, isolate);
- CHECK(finalization_group->NeedsCleanup());
- Object cleared1 =
- JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
+ CHECK(finalization_registry->NeedsCleanup());
+ Object cleared1 = JSFinalizationRegistry::PopClearedCellHoldings(
+ finalization_registry, isolate);
CHECK_EQ(cleared1, *holdings1);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 0);
}
- JSFinalizationGroup::Unregister(finalization_group, token1, isolate);
+ JSFinalizationRegistry::Unregister(finalization_registry, token1, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 0);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 0);
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 0);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 0);
}
}
@@ -637,11 +650,11 @@ TEST(TestWeakCellUnregisterNonexistentKey) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> token1 = CreateKey("token1", isolate);
- JSFinalizationGroup::Unregister(finalization_group, token1, isolate);
+ JSFinalizationRegistry::Unregister(finalization_registry, token1, isolate);
}
TEST(TestJSWeakRef) {
@@ -715,30 +728,35 @@ TEST(TestJSWeakRefKeepDuringJob) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<JSWeakRef> weak_ref;
- {
- HandleScope inner_scope(isolate);
-
- Handle<JSObject> js_object =
- isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
- heap->KeepDuringJob(js_object);
+ Handle<JSWeakRef> weak_ref = MakeWeakRefAndKeepDuringJob(isolate);
+ CHECK(!weak_ref->target().IsUndefined(isolate));
+ CcTest::CollectAllGarbage();
+ CHECK(!weak_ref->target().IsUndefined(isolate));
- weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
- }
+ // Clears the KeepDuringJob set.
+ context->GetIsolate()->ClearKeptObjects();
+ CcTest::CollectAllGarbage();
+ CHECK(weak_ref->target().IsUndefined(isolate));
+ weak_ref = MakeWeakRefAndKeepDuringJob(isolate);
+ CHECK(!weak_ref->target().IsUndefined(isolate));
+ CcTest::CollectAllGarbage();
CHECK(!weak_ref->target().IsUndefined(isolate));
+ // ClearKeptObjects should be called by PerformMicrotasksCheckpoint.
+ CcTest::isolate()->PerformMicrotaskCheckpoint();
CcTest::CollectAllGarbage();
+ CHECK(weak_ref->target().IsUndefined(isolate));
+ weak_ref = MakeWeakRefAndKeepDuringJob(isolate);
CHECK(!weak_ref->target().IsUndefined(isolate));
-
- // Clears the KeepDuringJob set.
- context->GetIsolate()->ClearKeptObjects();
CcTest::CollectAllGarbage();
+ CHECK(!weak_ref->target().IsUndefined(isolate));
+ // ClearKeptObjects should be called by MicrotasksScope::PerformCheckpoint.
+ v8::MicrotasksScope::PerformCheckpoint(CcTest::isolate());
+ CcTest::CollectAllGarbage();
CHECK(weak_ref->target().IsUndefined(isolate));
}
@@ -754,17 +772,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<JSWeakRef> weak_ref;
- {
- HandleScope inner_scope(isolate);
-
- Handle<JSObject> js_object =
- isolate->factory()->NewJSObject(isolate->object_function());
- Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
- heap->KeepDuringJob(js_object);
-
- weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
- }
+ Handle<JSWeakRef> weak_ref = MakeWeakRefAndKeepDuringJob(isolate);
CHECK(!weak_ref->target().IsUndefined(isolate));
@@ -787,8 +795,8 @@ TEST(TestRemoveUnregisterToken) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
- Handle<JSFinalizationGroup> finalization_group =
- ConstructJSFinalizationGroup(isolate);
+ Handle<JSFinalizationRegistry> finalization_registry =
+ ConstructJSFinalizationRegistry(isolate);
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -797,32 +805,32 @@ TEST(TestRemoveUnregisterToken) {
Handle<Object> undefined =
handle(ReadOnlyRoots(isolate).undefined_value(), isolate);
- Handle<WeakCell> weak_cell1a = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
- Handle<WeakCell> weak_cell1b = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1a = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
+ Handle<WeakCell> weak_cell1b = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token1, isolate);
- Handle<WeakCell> weak_cell2a = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
- Handle<WeakCell> weak_cell2b = FinalizationGroupRegister(
- finalization_group, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2a = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
+ Handle<WeakCell> weak_cell2b = FinalizationRegistryRegister(
+ finalization_registry, js_object, undefined, token2, isolate);
NullifyWeakCell(weak_cell2a, isolate);
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 3,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 3,
*weak_cell2b, *weak_cell1b, *weak_cell1a);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 1,
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 1,
*weak_cell2a);
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell1b,
*weak_cell1a);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 2, *weak_cell2b,
*weak_cell2a);
}
- finalization_group->RemoveUnregisterToken(
+ finalization_registry->RemoveUnregisterToken(
JSReceiver::cast(*token2), isolate,
[undefined](WeakCell matched_cell) {
matched_cell.set_unregister_token(*undefined);
@@ -830,15 +838,15 @@ TEST(TestRemoveUnregisterToken) {
[](HeapObject, ObjectSlot, Object) {});
// Both weak_cell2a and weak_cell2b remain on the weak cell chains.
- VerifyWeakCellChain(isolate, finalization_group->active_cells(), 3,
+ VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 3,
*weak_cell2b, *weak_cell1b, *weak_cell1a);
- VerifyWeakCellChain(isolate, finalization_group->cleared_cells(), 1,
+ VerifyWeakCellChain(isolate, finalization_registry->cleared_cells(), 1,
*weak_cell2a);
// But both weak_cell2a and weak_cell2b are removed from the key chain.
{
SimpleNumberDictionary key_map =
- SimpleNumberDictionary::cast(finalization_group->key_map());
+ SimpleNumberDictionary::cast(finalization_registry->key_map());
VerifyWeakCellKeyChain(isolate, key_map, *token1, 2, *weak_cell1b,
*weak_cell1a);
VerifyWeakCellKeyChain(isolate, key_map, *token2, 0);
diff --git a/deps/v8/test/cctest/test-local-handles.cc b/deps/v8/test/cctest/test-local-handles.cc
new file mode 100644
index 0000000000..0e4fc5c7d1
--- /dev/null
+++ b/deps/v8/test/cctest/test-local-handles.cc
@@ -0,0 +1,96 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "src/api/api.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/local-handles-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/safepoint.h"
+#include "src/objects/heap-number.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+class LocalHandlesThread final : public v8::base::Thread {
+ public:
+ LocalHandlesThread(Heap* heap, Address object, base::Semaphore* sema_started,
+ base::Semaphore* sema_gc_finished)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ object_(object),
+ sema_started_(sema_started),
+ sema_gc_finished_(sema_gc_finished) {}
+
+ void Run() override {
+ LocalHeap local_heap(heap_);
+ LocalHandleScope scope(&local_heap);
+
+ static constexpr int kNumHandles =
+ kHandleBlockSize * 2 + kHandleBlockSize / 2;
+
+ std::vector<Handle<HeapNumber>> handles;
+ handles.reserve(kNumHandles);
+
+ for (int i = 0; i < kNumHandles; i++) {
+ Handle<HeapNumber> number = handle(
+ HeapNumber::cast(HeapObject::FromAddress(object_)), &local_heap);
+ handles.push_back(number);
+ }
+
+ sema_started_->Signal();
+
+ {
+ ParkedScope scope(&local_heap);
+ sema_gc_finished_->Wait();
+ }
+
+ for (Handle<HeapNumber> handle : handles) {
+ CHECK_EQ(42.0, handle->value());
+ }
+ }
+
+ Heap* heap_;
+ Address object_;
+ base::Semaphore* sema_started_;
+ base::Semaphore* sema_gc_finished_;
+};
+
+TEST(CreateLocalHandles) {
+ CcTest::InitializeVM();
+ FLAG_local_heaps = true;
+ Isolate* isolate = CcTest::i_isolate();
+
+ Address object = kNullAddress;
+
+ {
+ HandleScope handle_scope(isolate);
+ Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(42.0);
+ object = (*number).address();
+ }
+
+ base::Semaphore sema_started(0);
+ base::Semaphore sema_gc_finished(0);
+
+ std::unique_ptr<LocalHandlesThread> thread(new LocalHandlesThread(
+ isolate->heap(), object, &sema_started, &sema_gc_finished));
+ CHECK(thread->Start());
+
+ sema_started.Wait();
+
+ CcTest::CollectAllGarbage();
+ sema_gc_finished.Signal();
+
+ thread->Join();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 80a6530dd7..5531b7ec95 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -306,9 +306,7 @@ TEST(Issue23768) {
.ToLocalChecked();
// Script needs to have a name in order to trigger InitLineEnds execution.
v8::Local<v8::String> origin =
- v8::String::NewFromUtf8(CcTest::isolate(), "issue-23768-test",
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromUtf8Literal(CcTest::isolate(), "issue-23768-test");
v8::Local<v8::Script> evil_script =
CompileWithOrigin(source, origin, v8_bool(false));
CHECK(!evil_script.IsEmpty());
@@ -567,7 +565,7 @@ UNINITIALIZED_TEST(LogAll) {
isolate->Dispose();
}
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
UNINITIALIZED_TEST(LogInterpretedFramesNativeStack) {
SETUP_FLAGS();
i::FLAG_interpreted_frames_native_stack = true;
@@ -652,7 +650,7 @@ UNINITIALIZED_TEST(LogInterpretedFramesNativeStackWithSerialization) {
} while (!has_cache);
delete cache;
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
UNINITIALIZED_TEST(ExternalCodeEventListener) {
i::FLAG_log = false;
@@ -755,7 +753,7 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
isolate2->Dispose();
}
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
UNINITIALIZED_TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
i::FLAG_log = false;
i::FLAG_prof = false;
@@ -805,7 +803,7 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
}
isolate->Dispose();
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_S390X
UNINITIALIZED_TEST(TraceMaps) {
SETUP_FLAGS();
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index 3d668360bd..13c6a3b7a8 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -45,147 +45,160 @@ MaybeLocal<Module> ResolveCallback(Local<Context> context,
}
TEST(ModuleInstantiationFailures1) {
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
- v8::TryCatch try_catch(isolate);
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
- Local<Module> module;
- {
- Local<String> source_text = v8_str(
- "import './foo.js';\n"
- "export {} from './bar.js';");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- CHECK_EQ(2, module->GetModuleRequestsLength());
- CHECK(v8_str("./foo.js")->StrictEquals(module->GetModuleRequest(0)));
- v8::Location loc = module->GetModuleRequestLocation(0);
- CHECK_EQ(0, loc.GetLineNumber());
- CHECK_EQ(7, loc.GetColumnNumber());
- CHECK(v8_str("./bar.js")->StrictEquals(module->GetModuleRequest(1)));
- loc = module->GetModuleRequestLocation(1);
- CHECK_EQ(1, loc.GetLineNumber());
- CHECK_EQ(15, loc.GetColumnNumber());
- }
+ Local<Module> module;
+ {
+ Local<String> source_text = v8_str(
+ "import './foo.js';\n"
+ "export {} from './bar.js';");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ CHECK_EQ(2, module->GetModuleRequestsLength());
+ CHECK(v8_str("./foo.js")->StrictEquals(module->GetModuleRequest(0)));
+ v8::Location loc = module->GetModuleRequestLocation(0);
+ CHECK_EQ(0, loc.GetLineNumber());
+ CHECK_EQ(7, loc.GetColumnNumber());
+ CHECK(v8_str("./bar.js")->StrictEquals(module->GetModuleRequest(1)));
+ loc = module->GetModuleRequestLocation(1);
+ CHECK_EQ(1, loc.GetLineNumber());
+ CHECK_EQ(15, loc.GetColumnNumber());
+ }
- // Instantiation should fail.
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(module->InstantiateModule(env.local(), ResolveCallback).IsNothing());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- }
+ // Instantiation should fail.
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(
+ module->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ }
- // Start over again...
- {
- Local<String> source_text = v8_str(
- "import './dep1.js';\n"
- "export {} from './bar.js';");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ // Start over again...
+ {
+ Local<String> source_text = v8_str(
+ "import './dep1.js';\n"
+ "export {} from './bar.js';");
+ ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- // dep1.js
- {
- Local<String> source_text = v8_str("");
- ScriptOrigin origin = ModuleOrigin(v8_str("dep1.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- dep1 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ // dep1.js
+ {
+ Local<String> source_text = v8_str("");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep1.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep1 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- // Instantiation should fail because a sub-module fails to resolve.
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(module->InstantiateModule(env.local(), ResolveCallback).IsNothing());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- }
+ // Instantiation should fail because a sub-module fails to resolve.
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(
+ module->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, module->GetStatus());
+ }
- CHECK(!try_catch.HasCaught());
+ CHECK(!try_catch.HasCaught());
+ }
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
}
TEST(ModuleInstantiationFailures2) {
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
- v8::TryCatch try_catch(isolate);
+ bool prev_top_level_await = i::FLAG_harmony_top_level_await;
+ for (auto top_level_await : {true, false}) {
+ i::FLAG_harmony_top_level_await = top_level_await;
- // root1.js
- Local<Module> root;
- {
- Local<String> source_text =
- v8_str("import './dep1.js'; import './dep2.js'");
- ScriptOrigin origin = ModuleOrigin(v8_str("root1.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- root = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+ v8::TryCatch try_catch(isolate);
- // dep1.js
- {
- Local<String> source_text = v8_str("export let x = 42");
- ScriptOrigin origin = ModuleOrigin(v8_str("dep1.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- dep1 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ // root1.js
+ Local<Module> root;
+ {
+ Local<String> source_text =
+ v8_str("import './dep1.js'; import './dep2.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("root1.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ root = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- // dep2.js
- {
- Local<String> source_text = v8_str("import {foo} from './dep3.js'");
- ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ // dep1.js
+ {
+ Local<String> source_text = v8_str("export let x = 42");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep1.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep1 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kUninstantiated, root->GetStatus());
- CHECK_EQ(Module::kUninstantiated, dep1->GetStatus());
- CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
- }
+ // dep2.js
+ {
+ Local<String> source_text = v8_str("import {foo} from './dep3.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- // Change dep2.js
- {
- Local<String> source_text = v8_str("import {foo} from './dep2.js'");
- ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, root->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep1->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ }
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
- CHECK(inner_try_catch.HasCaught());
- CHECK(!inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kUninstantiated, root->GetStatus());
- CHECK_EQ(Module::kInstantiated, dep1->GetStatus());
- CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
- }
+ // Change dep2.js
+ {
+ Local<String> source_text = v8_str("import {foo} from './dep2.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
- // Change dep2.js again
- {
- Local<String> source_text = v8_str("import {foo} from './dep3.js'");
- ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(!inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, root->GetStatus());
+ CHECK_EQ(Module::kInstantiated, dep1->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ }
- {
- v8::TryCatch inner_try_catch(isolate);
- CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
- CHECK(inner_try_catch.HasCaught());
- CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
- CHECK_EQ(Module::kUninstantiated, root->GetStatus());
- CHECK_EQ(Module::kInstantiated, dep1->GetStatus());
- CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ // Change dep2.js again
+ {
+ Local<String> source_text = v8_str("import {foo} from './dep3.js'");
+ ScriptOrigin origin = ModuleOrigin(v8_str("dep2.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ dep2 = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ }
+
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(root->InstantiateModule(env.local(), ResolveCallback).IsNothing());
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ CHECK_EQ(Module::kUninstantiated, root->GetStatus());
+ CHECK_EQ(Module::kInstantiated, dep1->GetStatus());
+ CHECK_EQ(Module::kUninstantiated, dep2->GetStatus());
+ }
}
+ i::FLAG_harmony_top_level_await = prev_top_level_await;
}
static MaybeLocal<Module> CompileSpecifierAsModuleResolveCallback(
@@ -726,7 +739,7 @@ TEST(ModuleEvaluationTopLevelAwaitError) {
CHECK(promise->Result()->StrictEquals(v8_str("boom")));
CHECK(module->GetException()->StrictEquals(v8_str("boom")));
- // TODO(joshualitt) I am not sure, but this might not be supposed to throw
+ // TODO(cbruni) I am not sure, but this might not be supposed to throw
// because it is async.
CHECK(!try_catch.HasCaught());
}
@@ -830,7 +843,7 @@ TEST(ModuleEvaluationTopLevelAwaitDynamicImport) {
CHECK_EQ(promise->State(), v8::Promise::kPending);
CHECK(!try_catch.HasCaught());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
}
i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
@@ -874,7 +887,7 @@ TEST(ModuleEvaluationTopLevelAwaitDynamicImportError) {
CHECK_EQ(promise->State(), v8::Promise::kPending);
CHECK(!try_catch.HasCaught());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK_EQ(Module::kErrored, module->GetStatus());
CHECK_EQ(promise->State(), v8::Promise::kRejected);
CHECK(promise->Result()->StrictEquals(v8_str("boom")));
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 9104e850db..04e47eb7bd 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -255,6 +255,29 @@ TEST(EnumCache) {
}
}
+TEST(ObjectMethodsThatTruncateMinusZero) {
+ LocalContext env;
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(env->GetIsolate());
+
+ Handle<Object> minus_zero = factory->NewNumber(-1.0 * 0.0);
+ CHECK(minus_zero->IsMinusZero());
+
+ Handle<Object> result =
+ Object::ToInteger(isolate, minus_zero).ToHandleChecked();
+ CHECK(result->IsZero());
+
+ result = Object::ToLength(isolate, minus_zero).ToHandleChecked();
+ CHECK(result->IsZero());
+
+ // Choose an error message template, doesn't matter which.
+ result = Object::ToIndex(isolate, minus_zero,
+ MessageTemplate::kInvalidAtomicAccessIndex)
+ .ToHandleChecked();
+ CHECK(result->IsZero());
+}
+
#define TEST_FUNCTION_KIND(Name) \
TEST(Name) { \
for (int i = 0; i < FunctionKind::kLastFunctionKind; i++) { \
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index 189f950b2e..f3887bdacd 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -1950,7 +1950,7 @@ TEST(OrderedNameDictionarySetEntry) {
Handle<String> other_value = isolate->factory()->InternalizeUtf8String("baz");
PropertyDetails other_details =
PropertyDetails(kAccessor, READ_ONLY, PropertyCellType::kNoCell);
- dict->SetEntry(isolate, entry, *key, *other_value, other_details);
+ dict->SetEntry(entry, *key, *other_value, other_details);
entry = dict->FindEntry(isolate, *key);
CHECK_EQ(0, entry);
@@ -1995,7 +1995,7 @@ TEST(SmallOrderedNameDictionarySetEntry) {
Handle<String> other_value = factory->InternalizeUtf8String("baz");
PropertyDetails other_details =
PropertyDetails(kAccessor, READ_ONLY, PropertyCellType::kNoCell);
- dict->SetEntry(isolate, entry, *key, *other_value, other_details);
+ dict->SetEntry(entry, *key, *other_value, other_details);
entry = dict->FindEntry(isolate, *key);
CHECK_EQ(0, entry);
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 2decf108ac..fd75f263ca 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -907,7 +907,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
HashSeed(CcTest::i_isolate()));
const i::AstRawString* current_symbol =
scanner.CurrentSymbol(&ast_value_factory);
- ast_value_factory.Internalize(CcTest::i_isolate()->factory());
+ ast_value_factory.Internalize(CcTest::i_isolate());
i::Handle<i::String> val = current_symbol->string();
i::DisallowHeapAllocation no_alloc;
i::String::FlatContent content = val->GetFlatContent(no_alloc);
@@ -1061,7 +1061,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
info.set_allow_lazy_parsing(false);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
CHECK(i::Rewriter::Rewrite(&info));
- info.ast_value_factory()->Internalize(isolate->factory());
+ info.ast_value_factory()->Internalize(isolate);
CHECK(i::DeclarationScope::Analyze(&info));
i::DeclarationScope::AllocateScopeInfos(&info, isolate);
CHECK_NOT_NULL(info.literal());
@@ -3214,15 +3214,11 @@ TEST(FuncNameInferrerTwoByte) {
// Make it really non-Latin1 (replace the Xs with a non-Latin1 character).
two_byte_source[14] = two_byte_source[78] = two_byte_name[6] = 0x010D;
v8::Local<v8::String> source =
- v8::String::NewFromTwoByte(isolate, two_byte_source,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromTwoByte(isolate, two_byte_source).ToLocalChecked();
v8::Local<v8::Value> result = CompileRun(source);
CHECK(result->IsString());
v8::Local<v8::String> expected_name =
- v8::String::NewFromTwoByte(isolate, two_byte_name,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromTwoByte(isolate, two_byte_name).ToLocalChecked();
CHECK(result->Equals(isolate->GetCurrentContext(), expected_name).FromJust());
i::DeleteArray(two_byte_source);
i::DeleteArray(two_byte_name);
@@ -3243,40 +3239,17 @@ TEST(FuncNameInferrerEscaped) {
// Fix to correspond to the non-ASCII name in two_byte_source.
two_byte_name[6] = 0x010D;
v8::Local<v8::String> source =
- v8::String::NewFromTwoByte(isolate, two_byte_source,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromTwoByte(isolate, two_byte_source).ToLocalChecked();
v8::Local<v8::Value> result = CompileRun(source);
CHECK(result->IsString());
v8::Local<v8::String> expected_name =
- v8::String::NewFromTwoByte(isolate, two_byte_name,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromTwoByte(isolate, two_byte_name).ToLocalChecked();
CHECK(result->Equals(isolate->GetCurrentContext(), expected_name).FromJust());
i::DeleteArray(two_byte_source);
i::DeleteArray(two_byte_name);
}
-TEST(RegressionLazyFunctionWithErrorWithArg) {
- // Test only applies when lazy parsing.
- if (!i::FLAG_lazy) return;
-
- // The bug occurred when a lazy function had an error which requires a
- // parameter (such as "unknown label" here). The error message was processed
- // before the AstValueFactory containing the error message string was
- // internalized.
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- LocalContext env;
- i::FLAG_lazy = true;
- CompileRun("function this_is_lazy() {\n"
- " break p;\n"
- "}\n"
- "this_is_lazy();\n");
-}
-
-
TEST(SerializationOfMaybeAssignmentFlag) {
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -3312,7 +3285,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::AstValueFactory avf(&zone, isolate->ast_string_constants(),
HashSeed(isolate));
const i::AstRawString* name = avf.GetOneByteString("result");
- avf.Internalize(isolate->factory());
+ avf.Internalize(isolate);
i::Handle<i::String> str = name->string();
CHECK(str->IsInternalizedString());
i::DeclarationScope* script_scope =
@@ -3362,7 +3335,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::AstValueFactory avf(&zone, isolate->ast_string_constants(),
HashSeed(isolate));
const i::AstRawString* name_x = avf.GetOneByteString("x");
- avf.Internalize(isolate->factory());
+ avf.Internalize(isolate);
i::DeclarationScope* script_scope =
new (&zone) i::DeclarationScope(&zone, &avf);
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 907c89fa0c..ce916c9c7a 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -76,7 +76,7 @@ static void CheckParseEq(const char* input, const char* expected,
CHECK(v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), &zone,
&reader, flags, &result));
CHECK_NOT_NULL(result.tree);
- CHECK(result.error.is_null());
+ CHECK(result.error == RegExpError::kNone);
std::ostringstream os;
result.tree->Print(os, &zone);
if (strcmp(expected, os.str().c_str()) != 0) {
@@ -94,7 +94,7 @@ static bool CheckSimple(const char* input) {
CHECK(v8::internal::RegExpParser::ParseRegExp(
CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
CHECK_NOT_NULL(result.tree);
- CHECK(result.error.is_null());
+ CHECK(result.error == RegExpError::kNone);
return result.simple;
}
@@ -112,7 +112,7 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
CHECK(v8::internal::RegExpParser::ParseRegExp(
CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
CHECK_NOT_NULL(result.tree);
- CHECK(result.error.is_null());
+ CHECK(result.error == RegExpError::kNone);
int min_match = result.tree->min_match();
int max_match = result.tree->max_match();
MinMaxPair pair = { min_match, max_match };
@@ -428,9 +428,8 @@ static void ExpectError(const char* input, const char* expected,
CHECK(!v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
&result));
CHECK_NULL(result.tree);
- CHECK(!result.error.is_null());
- std::unique_ptr<char[]> str = result.error->ToCString(ALLOW_NULLS);
- CHECK_EQ(0, strcmp(expected, str.get()));
+ CHECK(result.error != RegExpError::kNone);
+ CHECK_EQ(0, strcmp(expected, RegExpErrorString(result.error)));
}
@@ -468,7 +467,7 @@ TEST(Errors) {
ExpectError("\\k<a", kInvalidCaptureName, true);
const char* kDuplicateCaptureName = "Duplicate capture group name";
ExpectError("(?<a>.)(?<a>.)", kDuplicateCaptureName, true);
- const char* kInvalidUnicodeEscape = "Invalid Unicode escape sequence";
+ const char* kInvalidUnicodeEscape = "Invalid Unicode escape";
ExpectError("(?<\\u{FISK}", kInvalidUnicodeEscape, true);
const char* kInvalidCaptureReferenced = "Invalid named capture referenced";
ExpectError("\\k<a>", kInvalidCaptureReferenced, true);
@@ -607,7 +606,7 @@ using ArchRegExpMacroAssembler = RegExpMacroAssemblerARM;
using ArchRegExpMacroAssembler = RegExpMacroAssemblerARM64;
#elif V8_TARGET_ARCH_S390
using ArchRegExpMacroAssembler = RegExpMacroAssemblerS390;
-#elif V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
using ArchRegExpMacroAssembler = RegExpMacroAssemblerPPC;
#elif V8_TARGET_ARCH_MIPS
using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
@@ -721,9 +720,9 @@ TEST(MacroAssemblerNativeSimple) {
m.AdvanceCurrentPosition(3);
m.PushBacktrack(&backtrack);
m.Succeed();
- m.Bind(&backtrack);
+ m.BindJumpTarget(&backtrack);
m.Backtrack();
- m.Bind(&fail);
+ m.BindJumpTarget(&fail);
m.Fail();
Handle<String> source = factory->NewStringFromStaticChars("^foo");
@@ -780,9 +779,9 @@ TEST(MacroAssemblerNativeSimpleUC16) {
m.AdvanceCurrentPosition(3);
m.PushBacktrack(&backtrack);
m.Succeed();
- m.Bind(&backtrack);
+ m.BindJumpTarget(&backtrack);
m.Backtrack();
- m.Bind(&fail);
+ m.BindJumpTarget(&fail);
m.Fail();
Handle<String> source = factory->NewStringFromStaticChars("^foo");
@@ -835,11 +834,11 @@ TEST(MacroAssemblerNativeBacktrack) {
Label backtrack;
m.LoadCurrentCharacter(10, &fail);
m.Succeed();
- m.Bind(&fail);
+ m.BindJumpTarget(&fail);
m.PushBacktrack(&backtrack);
m.LoadCurrentCharacter(10, nullptr);
m.Succeed();
- m.Bind(&backtrack);
+ m.BindJumpTarget(&backtrack);
m.Fail();
Handle<String> source = factory->NewStringFromStaticChars("..........");
@@ -967,7 +966,7 @@ TEST(MacroAssemblernativeAtStart) {
m.CheckNotAtStart(0, &not_at_start);
// Check that prevchar = '\n' and current = 'f'.
m.CheckCharacter('\n', &newline);
- m.Bind(&fail);
+ m.BindJumpTarget(&fail);
m.Fail();
m.Bind(&newline);
m.LoadCurrentCharacter(0, &fail);
@@ -1021,16 +1020,16 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
m.WriteCurrentPositionToRegister(2, 0);
m.AdvanceCurrentPosition(3);
m.WriteCurrentPositionToRegister(3, 0);
- m.CheckNotBackReferenceIgnoreCase(2, false, false, &fail); // Match "AbC".
- m.CheckNotBackReferenceIgnoreCase(2, false, false, &fail); // Match "ABC".
+ m.CheckNotBackReferenceIgnoreCase(2, false, &fail); // Match "AbC".
+ m.CheckNotBackReferenceIgnoreCase(2, false, &fail); // Match "ABC".
Label expected_fail;
- m.CheckNotBackReferenceIgnoreCase(2, false, false, &expected_fail);
- m.Bind(&fail);
+ m.CheckNotBackReferenceIgnoreCase(2, false, &expected_fail);
+ m.BindJumpTarget(&fail);
m.Fail();
m.Bind(&expected_fail);
m.AdvanceCurrentPosition(3); // Skip "xYz"
- m.CheckNotBackReferenceIgnoreCase(2, false, false, &succ);
+ m.CheckNotBackReferenceIgnoreCase(2, false, &succ);
m.Fail();
m.Bind(&succ);
@@ -1094,7 +1093,7 @@ TEST(MacroAssemblerNativeRegisters) {
m.AdvanceCurrentPosition(2);
m.PopCurrentPosition();
- m.Bind(&backtrack);
+ m.BindJumpTarget(&backtrack);
m.PopRegister(out1);
m.ReadCurrentPositionFromRegister(out1);
m.AdvanceCurrentPosition(3);
@@ -1131,7 +1130,7 @@ TEST(MacroAssemblerNativeRegisters) {
m.Succeed();
- m.Bind(&fail);
+ m.BindJumpTarget(&fail);
m.Fail();
Handle<String> source = factory->NewStringFromStaticChars("<loop test>");
@@ -1265,10 +1264,10 @@ TEST(MacroAssembler) {
m.AdvanceCurrentPosition(3);
m.PushBacktrack(&backtrack);
m.Succeed();
- m.Bind(&backtrack);
+ m.BindJumpTarget(&backtrack);
m.ClearRegisters(2, 3);
m.Backtrack();
- m.Bind(&fail);
+ m.BindJumpTarget(&fail);
m.PopRegister(0);
m.Fail();
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index f3d1a56543..26002621b6 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -44,7 +44,6 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
V(builtins_constants_table) \
V(current_microtask) \
V(detached_contexts) \
- V(dirty_js_finalization_groups) \
V(feedback_vectors_for_profiling_tools) \
V(shared_wasm_memories) \
V(materialized_objects) \
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 3c8f352551..7197101d41 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -7,7 +7,6 @@
#include <map>
#include <string>
#include "include/v8.h"
-#include "src/execution/simulator.h"
#include "src/flags/flags.h"
#include "test/cctest/cctest.h"
@@ -31,68 +30,6 @@ class Sample {
};
-#if defined(USE_SIMULATOR)
-class SimulatorHelper {
- public:
- inline bool Init(v8::Isolate* isolate) {
- simulator_ = reinterpret_cast<v8::internal::Isolate*>(isolate)
- ->thread_local_top()
- ->simulator_;
- // Check if there is active simulator.
- return simulator_ != nullptr;
- }
-
- inline void FillRegisters(v8::RegisterState* state) {
-#if V8_TARGET_ARCH_ARM
- state->pc = reinterpret_cast<void*>(simulator_->get_pc());
- state->sp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::sp));
- state->fp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::r11));
- state->lr = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::lr));
-#elif V8_TARGET_ARCH_ARM64
- if (simulator_->sp() == 0 || simulator_->fp() == 0) {
- // It's possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to a new value.
- // Bailout if sp/fp doesn't contain the new value.
- return;
- }
- state->pc = reinterpret_cast<void*>(simulator_->pc());
- state->sp = reinterpret_cast<void*>(simulator_->sp());
- state->fp = reinterpret_cast<void*>(simulator_->fp());
- state->lr = reinterpret_cast<void*>(simulator_->lr());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- state->pc = reinterpret_cast<void*>(simulator_->get_pc());
- state->sp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::sp));
- state->fp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::fp));
-#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
- state->pc = reinterpret_cast<void*>(simulator_->get_pc());
- state->sp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::sp));
- state->fp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::fp));
- state->lr = reinterpret_cast<void*>(simulator_->get_lr());
-#elif V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
- state->pc = reinterpret_cast<void*>(simulator_->get_pc());
- state->sp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::sp));
- state->fp = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::fp));
- state->lr = reinterpret_cast<void*>(
- simulator_->get_register(v8::internal::Simulator::ra));
-#endif
- }
-
- private:
- v8::internal::Simulator* simulator_;
-};
-#endif // USE_SIMULATOR
-
-
class SamplingTestHelper {
public:
struct CodeEventEntry {
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index f37b623504..7ce8ef7152 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -53,6 +53,7 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/read-only-serializer.h"
+#include "src/snapshot/snapshot-compression.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-deserializer.h"
#include "src/snapshot/startup-serializer.h"
@@ -414,6 +415,24 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
ReadOnlyHeap::ClearSharedHeapForTest();
}
+UNINITIALIZED_TEST(SnapshotCompression) {
+ DisableAlwaysOpt();
+ Vector<const byte> startup_blob;
+ Vector<const byte> read_only_blob;
+ Vector<const byte> partial_blob;
+ PartiallySerializeContext(&startup_blob, &read_only_blob, &partial_blob);
+ SnapshotData original_snapshot_data(partial_blob);
+ SnapshotData compressed =
+ i::SnapshotCompression::Compress(&original_snapshot_data);
+ SnapshotData decompressed =
+ i::SnapshotCompression::Decompress(compressed.RawData());
+ CHECK_EQ(partial_blob, decompressed.RawData());
+
+ startup_blob.Dispose();
+ read_only_blob.Dispose();
+ partial_blob.Dispose();
+}
+
UNINITIALIZED_TEST(PartialSerializerContext) {
DisableAlwaysOpt();
Vector<const byte> startup_blob;
@@ -888,7 +907,8 @@ void TestInt32Expectations(const Int32Expectations& expectations) {
void TypedArrayTestHelper(
const char* code, const Int32Expectations& expectations,
const char* code_to_run_after_restore = nullptr,
- const Int32Expectations& after_restore_expectations = Int32Expectations()) {
+ const Int32Expectations& after_restore_expectations = Int32Expectations(),
+ v8::ArrayBuffer::Allocator* allocator = nullptr) {
DisableAlwaysOpt();
i::FLAG_allow_natives_syntax = true;
DisableEmbeddedBlobRefcounting();
@@ -914,7 +934,8 @@ void TypedArrayTestHelper(
ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ create_params.array_buffer_allocator =
+ allocator != nullptr ? allocator : CcTest::array_buffer_allocator();
v8::Isolate* isolate = TestSerializer::NewIsolate(create_params);
{
v8::Isolate::Scope i_scope(isolate);
@@ -1010,6 +1031,46 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobDataView) {
TypedArrayTestHelper(code, expectations);
}
+namespace {
+class AlternatingArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ AlternatingArrayBufferAllocator()
+ : allocation_fails_(false),
+ allocator_(v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {}
+ ~AlternatingArrayBufferAllocator() { delete allocator_; }
+ void* Allocate(size_t length) override {
+ allocation_fails_ = !allocation_fails_;
+ if (allocation_fails_) return nullptr;
+ return allocator_->Allocate(length);
+ }
+
+ void* AllocateUninitialized(size_t length) override {
+ return this->Allocate(length);
+ }
+
+ void Free(void* data, size_t size) override { allocator_->Free(data, size); }
+
+ void* Reallocate(void* data, size_t old_length, size_t new_length) override {
+ return allocator_->Reallocate(data, old_length, new_length);
+ }
+
+ private:
+ bool allocation_fails_;
+ v8::ArrayBuffer::Allocator* allocator_;
+};
+} // anonymous namespace
+
+UNINITIALIZED_TEST(CustomSnapshotManyArrayBuffers) {
+ const char* code =
+ "var buffers = [];"
+ "for (let i = 0; i < 70; i++) buffers.push(new Uint8Array(1000));";
+ Int32Expectations expectations = {std::make_tuple("buffers.length", 70)};
+ std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
+ new AlternatingArrayBufferAllocator());
+ TypedArrayTestHelper(code, expectations, nullptr, Int32Expectations(),
+ allocator.get());
+}
+
UNINITIALIZED_TEST(CustomSnapshotDataBlobDetachedArrayBuffer) {
const char* code =
"var x = new Int16Array([12, 24, 48]);"
@@ -1611,7 +1672,7 @@ void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
TEST(CodeSerializerOnePlusOne) { TestCodeSerializerOnePlusOneImpl(); }
// See bug v8:9122
-#ifndef V8_TARGET_ARCH_ARM
+#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_S390X)
TEST(CodeSerializerOnePlusOneWithInterpretedFramesNativeStack) {
FLAG_interpreted_frames_native_stack = true;
// We pass false because this test will create IET copies (which are
@@ -1810,7 +1871,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
Handle<String> moving_object;
Page* ec_page;
{
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(heap);
heap::SimulateFullSpace(heap->old_space());
moving_object = isolate->factory()->InternalizeString(
isolate->factory()->NewStringFromAsciiChecked("happy_hippo"));
@@ -2455,8 +2516,10 @@ TEST(CodeSerializerBitFlip) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache = CompileRunAndProduceCache(source);
- // Random bit flip.
- const_cast<uint8_t*>(cache->data)[337] ^= 0x40;
+ // Arbitrary bit flip.
+ int arbitrary_spot = 337;
+ CHECK_LT(arbitrary_spot, cache->length);
+ const_cast<uint8_t*>(cache->data)[arbitrary_spot] ^= 0x40;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index a3f3b89347..2a6843b892 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -686,7 +686,7 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
for (int i = 0; i < test_cases; i++) {
printf("%d\n", i);
HandleScope inner_scope(isolate);
- AlwaysAllocateScope always_allocate(isolate);
+ AlwaysAllocateScopeForTesting always_allocate(isolate->heap());
// Build flat version of cons string.
Handle<String> flat_string = build(i, &data);
ConsStringStats flat_string_stats;
@@ -1347,10 +1347,9 @@ TEST(CachedHashOverflow) {
const char* line = lines[i];
printf("%s\n", line);
v8::Local<v8::Value> result =
- v8::Script::Compile(context,
- v8::String::NewFromUtf8(CcTest::isolate(), line,
- v8::NewStringType::kNormal)
- .ToLocalChecked())
+ v8::Script::Compile(
+ context,
+ v8::String::NewFromUtf8(CcTest::isolate(), line).ToLocalChecked())
.ToLocalChecked()
->Run(context)
.ToLocalChecked();
@@ -1873,11 +1872,6 @@ void TestString(i::Isolate* isolate, const IndexData& data) {
uint32_t index;
CHECK(s->AsArrayIndex(&index));
CHECK_EQ(data.array_index, index);
- // AsArrayIndex only forces hash computation for cacheable indices;
- // so trigger hash computation for longer strings manually.
- if (s->length() > String::kMaxCachedArrayIndexLength) s->Hash();
- CHECK_EQ(0, s->hash_field() & String::kIsNotArrayIndexMask);
- CHECK(s->HasHashCode());
}
if (data.is_integer_index) {
size_t index;
@@ -1889,9 +1883,6 @@ void TestString(i::Isolate* isolate, const IndexData& data) {
}
if (!s->HasHashCode()) s->Hash();
CHECK(s->HasHashCode());
- if (!data.is_array_index) {
- CHECK_NE(0, s->hash_field() & String::kIsNotArrayIndexMask);
- }
if (!data.is_integer_index) {
CHECK_NE(0, s->hash_field() & String::kIsNotIntegerIndexMask);
}
@@ -1942,21 +1933,13 @@ TEST(StringEquals) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- auto foo_str =
- v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
- .ToLocalChecked();
- auto bar_str =
- v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal)
- .ToLocalChecked();
- auto foo_str2 =
- v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
- .ToLocalChecked();
+ auto foo_str = v8::String::NewFromUtf8Literal(isolate, "foo");
+ auto bar_str = v8::String::NewFromUtf8Literal(isolate, "bar");
+ auto foo_str2 = v8::String::NewFromUtf8Literal(isolate, "foo");
uint16_t* two_byte_source = AsciiToTwoByteString("foo");
auto foo_two_byte_str =
- v8::String::NewFromTwoByte(isolate, two_byte_source,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ v8::String::NewFromTwoByte(isolate, two_byte_source).ToLocalChecked();
i::DeleteArray(two_byte_source);
CHECK(foo_str->StringEquals(foo_str));
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index f5f19f0687..c6f0d2c2f5 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -200,7 +200,7 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
AssembleLoadExcl(&masm, access1, w1, x1);
AssembleMemoryAccess(&masm, access2, w3, w2, x1);
AssembleStoreExcl(&masm, access3, w0, w3, x1);
- __ br(lr);
+ __ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -271,7 +271,7 @@ int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
HandleScope scope(isolate);
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
AssembleMemoryAccess(&masm, access, w0, w2, x1);
- __ br(lr);
+ __ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index f7cd09848e..b371cd8d3c 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -492,10 +492,11 @@ TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
isolate->EnqueueMicrotask(
v8::Function::New(isolate->GetCurrentContext(), MicrotaskShouldNotRun)
.ToLocalChecked());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
isolate->CancelTerminateExecution();
- isolate->RunMicrotasks(); // should not run MicrotaskShouldNotRun
+ // Should not run MicrotaskShouldNotRun.
+ isolate->PerformMicrotaskCheckpoint();
thread.Join();
delete semaphore;
@@ -913,7 +914,7 @@ TEST(TerminateInMicrotask) {
CHECK(context2 == isolate->GetCurrentContext());
CHECK(context2 == isolate->GetEnteredOrMicrotaskContext());
CHECK(!isolate->IsExecutionTerminating());
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK(context2 == isolate->GetCurrentContext());
CHECK(context2 == isolate->GetEnteredOrMicrotaskContext());
CHECK(try_catch.HasCaught());
@@ -948,7 +949,7 @@ TEST(TerminateInApiMicrotask) {
CHECK(!isolate->IsExecutionTerminating());
isolate->EnqueueMicrotask(TerminationMicrotask);
isolate->EnqueueMicrotask(UnreachableMicrotask);
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
CHECK(try_catch.HasCaught());
CHECK(try_catch.HasTerminated());
CHECK(isolate->IsExecutionTerminating());
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index fc213b5bf9..0b05dff6b8 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -180,20 +180,22 @@ struct Tests {
// Constructor
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- Type type = T.NewConstant(value);
- CHECK(type.IsHeapConstant() || type.IsOtherNumberConstant() ||
- type.IsRange());
+ Type type = T.Constant(value);
+ CHECK(type.IsBitset() || type.IsHeapConstant() ||
+ type.IsOtherNumberConstant() || type.IsRange());
}
// Value attribute
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- Type type = T.NewConstant(value);
+ Type type = T.Constant(value);
if (type.IsHeapConstant()) {
CHECK(value.address() == type.AsHeapConstant()->Value().address());
} else if (type.IsOtherNumberConstant()) {
CHECK(value->IsHeapNumber());
CHECK(value->Number() == type.AsOtherNumberConstant()->Value());
+ } else if (type.IsBitset()) {
+ CHECK(type.IsSingleton());
} else {
CHECK(type.IsRange());
double v = value->Number();
@@ -206,8 +208,8 @@ struct Tests {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- Type type1 = T.NewConstant(value1);
- Type type2 = T.NewConstant(value2);
+ Type type1 = T.Constant(value1);
+ Type type2 = T.Constant(value2);
if (type1.IsOtherNumberConstant() && type2.IsOtherNumberConstant()) {
CHECK(Equal(type1, type2) ==
(type1.AsOtherNumberConstant()->Value() ==
@@ -224,62 +226,70 @@ struct Tests {
// Typing of numbers
Factory* fac = isolate->factory();
- CHECK(T.NewConstant(fac->NewNumber(0)).Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(1)).Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(0x3FFFFFFF)).Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(-1)).Is(T.Negative31));
- CHECK(T.NewConstant(fac->NewNumber(-0x3FFFFFFF)).Is(T.Negative31));
- CHECK(T.NewConstant(fac->NewNumber(-0x40000000)).Is(T.Negative31));
- CHECK(T.NewConstant(fac->NewNumber(0x40000000)).Is(T.Unsigned31));
- CHECK(!T.NewConstant(fac->NewNumber(0x40000000)).Is(T.Unsigned30));
- CHECK(T.NewConstant(fac->NewNumber(0x7FFFFFFF)).Is(T.Unsigned31));
- CHECK(!T.NewConstant(fac->NewNumber(0x7FFFFFFF)).Is(T.Unsigned30));
- CHECK(T.NewConstant(fac->NewNumber(-0x40000001)).Is(T.Negative32));
- CHECK(!T.NewConstant(fac->NewNumber(-0x40000001)).Is(T.Negative31));
- CHECK(T.NewConstant(fac->NewNumber(-0x7FFFFFFF)).Is(T.Negative32));
- CHECK(!T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 1)).Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(0)).Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(1)).Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(42)).Equals(T.Range(42, 42)));
+ CHECK(T.Constant(fac->NewNumber(0x3FFFFFFF)).Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-1)).Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x3FFFFFFF)).Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x40000000)).Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(0x40000000)).Is(T.Unsigned31));
+ CHECK(!T.Constant(fac->NewNumber(0x40000000)).Is(T.Unsigned30));
+ CHECK(T.Constant(fac->NewNumber(0x7FFFFFFF)).Is(T.Unsigned31));
+ CHECK(!T.Constant(fac->NewNumber(0x7FFFFFFF)).Is(T.Unsigned30));
+ CHECK(T.Constant(fac->NewNumber(-0x40000001)).Is(T.Negative32));
+ CHECK(!T.Constant(fac->NewNumber(-0x40000001)).Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x7FFFFFFF)).Is(T.Negative32));
+ CHECK(!T.Constant(fac->NewNumber(-0x7FFFFFFF - 1)).Is(T.Negative31));
if (SmiValuesAre31Bits()) {
- CHECK(!T.NewConstant(fac->NewNumber(0x40000000)).Is(T.UnsignedSmall));
- CHECK(!T.NewConstant(fac->NewNumber(0x7FFFFFFF)).Is(T.UnsignedSmall));
- CHECK(!T.NewConstant(fac->NewNumber(-0x40000001)).Is(T.SignedSmall));
- CHECK(!T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 1)).Is(T.SignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(0x40000000)).Is(T.UnsignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(0x7FFFFFFF)).Is(T.UnsignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(-0x40000001)).Is(T.SignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(-0x7FFFFFFF - 1)).Is(T.SignedSmall));
} else {
CHECK(SmiValuesAre32Bits());
- CHECK(T.NewConstant(fac->NewNumber(0x40000000)).Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(0x7FFFFFFF)).Is(T.UnsignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(-0x40000001)).Is(T.SignedSmall));
- CHECK(T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 1)).Is(T.SignedSmall));
- }
- CHECK(T.NewConstant(fac->NewNumber(0x80000000u)).Is(T.Unsigned32));
- CHECK(!T.NewConstant(fac->NewNumber(0x80000000u)).Is(T.Unsigned31));
- CHECK(T.NewConstant(fac->NewNumber(0xFFFFFFFFu)).Is(T.Unsigned32));
- CHECK(!T.NewConstant(fac->NewNumber(0xFFFFFFFFu)).Is(T.Unsigned31));
- CHECK(T.NewConstant(fac->NewNumber(0xFFFFFFFFu + 1.0)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(0xFFFFFFFFu + 1.0)).Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 2.0)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(-0x7FFFFFFF - 2.0)).Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(0.1)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(0.1)).Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(-10.1)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(-10.1)).Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(10e60)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(10e60)).Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(-1.0 * 0.0)).Is(T.MinusZero));
- CHECK(
- T.NewConstant(fac->NewNumber(std::numeric_limits<double>::quiet_NaN()))
- .Is(T.NaN));
- CHECK(T.NewConstant(fac->NewNumber(V8_INFINITY)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(V8_INFINITY)).Is(T.Integral32));
- CHECK(T.NewConstant(fac->NewNumber(-V8_INFINITY)).Is(T.PlainNumber));
- CHECK(!T.NewConstant(fac->NewNumber(-V8_INFINITY)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(0x40000000)).Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(0x7FFFFFFF)).Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-0x40000001)).Is(T.SignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-0x7FFFFFFF - 1)).Is(T.SignedSmall));
+ }
+ CHECK(T.Constant(fac->NewNumber(0x80000000u)).Is(T.Unsigned32));
+ CHECK(!T.Constant(fac->NewNumber(0x80000000u)).Is(T.Unsigned31));
+ CHECK(T.Constant(fac->NewNumber(0xFFFFFFFFu)).Is(T.Unsigned32));
+ CHECK(!T.Constant(fac->NewNumber(0xFFFFFFFFu)).Is(T.Unsigned31));
+ CHECK(T.Constant(fac->NewNumber(0xFFFFFFFFu + 1.0)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(0xFFFFFFFFu + 1.0)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-0x7FFFFFFF - 2.0)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(-0x7FFFFFFF - 2.0)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(0.1)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(0.1)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-10.1)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(-10.1)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(10e60)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(10e60)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-1.0 * 0.0)).Is(T.MinusZero));
+ CHECK(T.Constant(fac->NewNumber(V8_INFINITY)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(V8_INFINITY)).Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-V8_INFINITY)).Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(-V8_INFINITY)).Is(T.Integral32));
// Typing of Strings
Handle<String> s1 = fac->NewStringFromAsciiChecked("a");
- CHECK(T.NewConstant(s1).Is(T.InternalizedString));
+ CHECK(T.Constant(s1).Is(T.InternalizedString));
const uc16 two_byte[1] = {0x2603};
Handle<String> s2 =
fac->NewTwoByteInternalizedString(Vector<const uc16>(two_byte, 1), 1);
- CHECK(T.NewConstant(s2).Is(T.InternalizedString));
+ CHECK(T.Constant(s2).Is(T.InternalizedString));
+
+ // Typing of special constants
+ CHECK(T.Constant(fac->the_hole_value()).Equals(T.Hole));
+ CHECK(T.Constant(fac->null_value()).Equals(T.Null));
+ CHECK(T.Constant(fac->undefined_value()).Equals(T.Undefined));
+ CHECK(T.Constant(fac->minus_zero_value()).Equals(T.MinusZero));
+ CHECK(T.Constant(fac->NewNumber(-0.0)).Equals(T.MinusZero));
+ CHECK(T.Constant(fac->nan_value()).Equals(T.NaN));
+ CHECK(T.Constant(fac->NewNumber(std::numeric_limits<double>::quiet_NaN()))
+ .Equals(T.NaN));
}
void Range() {
@@ -545,8 +555,8 @@ struct Tests {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- Type const_type1 = T.NewConstant(value1);
- Type const_type2 = T.NewConstant(value2);
+ Type const_type1 = T.Constant(value1);
+ Type const_type2 = T.Constant(value2);
if (const_type1.IsOtherNumberConstant() &&
const_type2.IsOtherNumberConstant()) {
CHECK(const_type1.Is(const_type2) ==
@@ -686,8 +696,8 @@ struct Tests {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- Type const_type1 = T.NewConstant(value1);
- Type const_type2 = T.NewConstant(value2);
+ Type const_type1 = T.Constant(value1);
+ Type const_type2 = T.Constant(value2);
if (const_type1.IsOtherNumberConstant() &&
const_type2.IsOtherNumberConstant()) {
CHECK(const_type1.Maybe(const_type2) ==
@@ -1045,37 +1055,21 @@ struct Tests {
} // namespace
TEST(IsSomeType) { Tests().IsSomeType(); }
-
TEST(BitsetType) { Tests().Bitset(); }
-
TEST(ConstantType) { Tests().Constant(); }
-
TEST(RangeType) { Tests().Range(); }
-
TEST(MinMax) { Tests().MinMax(); }
-
TEST(BitsetGlb) { Tests().BitsetGlb(); }
-
TEST(BitsetLub) { Tests().BitsetLub(); }
-
TEST(Is1) { Tests().Is1(); }
-
TEST(Is2) { Tests().Is2(); }
-
TEST(Maybe) { Tests().Maybe(); }
-
TEST(Union1) { Tests().Union1(); }
-
TEST(Union2) { Tests().Union2(); }
-
TEST(Union3) { Tests().Union3(); }
-
TEST(Union4) { Tests().Union4(); }
-
TEST(Intersect) { Tests().Intersect(); }
-
TEST(Distributivity) { Tests().Distributivity(); }
-
TEST(GetRange) { Tests().GetRange(); }
} // namespace compiler
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index 6177be6de8..fc023e4145 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -591,6 +591,80 @@ TEST(PCIsInV8_LargeCodeObject_CodePagesAPI) {
CHECK(v8::Unwinder::PCIsInV8(pages_length, code_pages, pc));
}
+#ifdef USE_SIMULATOR
+// TODO(v8:10026): Make this also work without the simulator. The part that
+// needs modifications is getting the RegisterState.
+class UnwinderTestHelper {
+ public:
+ explicit UnwinderTestHelper(const std::string& test_function)
+ : isolate_(CcTest::isolate()) {
+ CHECK(!instance_);
+ instance_ = this;
+ v8::HandleScope scope(isolate_);
+ v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate_);
+ global->Set(v8_str("TryUnwind"),
+ v8::FunctionTemplate::New(isolate_, TryUnwind));
+ LocalContext env(isolate_, nullptr, global);
+ CompileRun(v8_str(test_function.c_str()));
+ }
+
+ ~UnwinderTestHelper() { instance_ = nullptr; }
+
+ private:
+ static void TryUnwind(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ instance_->DoTryUnwind();
+ }
+
+ void DoTryUnwind() {
+ // Set up RegisterState.
+ v8::RegisterState register_state;
+ SimulatorHelper simulator_helper;
+ if (!simulator_helper.Init(isolate_)) return;
+ simulator_helper.FillRegisters(&register_state);
+ // At this point, the PC will point to a Redirection object, which is not
+ // in V8 as far as the unwinder is concerned. To make this work, point to
+ // the return address, which is in V8, instead.
+ register_state.pc = register_state.lr;
+
+ JSEntryStubs entry_stubs = isolate_->GetJSEntryStubs();
+ MemoryRange code_pages[v8::Isolate::kMinCodePagesBufferSize];
+ size_t pages_length =
+ isolate_->CopyCodePages(arraysize(code_pages), code_pages);
+ CHECK_LE(pages_length, arraysize(code_pages));
+
+ void* stack_base = reinterpret_cast<void*>(0xffffffffffffffffL);
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(
+ entry_stubs, pages_length, code_pages, &register_state, stack_base);
+ // Check that we have successfully unwound past js_entry_sp.
+ CHECK(unwound);
+ CHECK_GT(register_state.sp,
+ reinterpret_cast<void*>(CcTest::i_isolate()->js_entry_sp()));
+ }
+
+ v8::Isolate* isolate_;
+
+ static UnwinderTestHelper* instance_;
+};
+
+UnwinderTestHelper* UnwinderTestHelper::instance_;
+
+TEST(Unwind_TwoNestedFunctions_CodePagesAPI) {
+ i::FLAG_allow_natives_syntax = true;
+ const char* test_script =
+ "function test_unwinder_api_inner() {"
+ " TryUnwind();"
+ " return 0;"
+ "}"
+ "function test_unwinder_api_outer() {"
+ " return test_unwinder_api_inner();"
+ "}"
+ "%NeverOptimizeFunction(test_unwinder_api_inner);"
+ "%NeverOptimizeFunction(test_unwinder_api_outer);"
+ "test_unwinder_api_outer();";
+
+ UnwinderTestHelper helper(test_script);
+}
+#endif
} // namespace test_unwinder_code_pages
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-unwinder.cc b/deps/v8/test/cctest/test-unwinder.cc
index ffe46f4ca2..59c8708767 100644
--- a/deps/v8/test/cctest/test-unwinder.cc
+++ b/deps/v8/test/cctest/test-unwinder.cc
@@ -7,6 +7,7 @@
#include "src/api/api-inl.h"
#include "src/builtins/builtins.h"
#include "src/execution/isolate.h"
+#include "src/execution/pointer-authentication.h"
#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
#include "test/cctest/cctest.h"
@@ -38,6 +39,11 @@ TEST(Unwind_BadState_Fail) {
CHECK_NULL(register_state.pc);
}
+void StorePc(uintptr_t stack[], int index, uintptr_t pc) {
+ Address sp = reinterpret_cast<Address>(&stack[index]) + kSystemPointerSize;
+ stack[index] = PointerAuthentication::SignPCWithSP(pc, sp);
+}
+
TEST(Unwind_BuiltinPCInMiddle_Success) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -49,7 +55,7 @@ TEST(Unwind_BuiltinPCInMiddle_Success) {
uintptr_t stack[3];
void* stack_base = stack + arraysize(stack);
stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP (rbp).
- stack[1] = 202; // Return address into C++ code.
+ StorePc(stack, 1, 202); // Return address into C++ code.
stack[2] = 303; // The SP points here in the caller's frame.
register_state.sp = stack;
@@ -93,9 +99,9 @@ TEST(Unwind_BuiltinPCAtStart_Success) {
stack[0] = 101;
// Return address into JS code. It doesn't matter that this is not actually in
// JSEntry, because we only check that for the top frame.
- stack[1] = reinterpret_cast<uintptr_t>(code + 10);
+ StorePc(stack, 1, reinterpret_cast<uintptr_t>(code + 10));
stack[2] = reinterpret_cast<uintptr_t>(stack + 5); // saved FP (rbp).
- stack[3] = 303; // Return address into C++ code.
+ StorePc(stack, 3, 303); // Return address into C++ code.
stack[4] = 404;
stack[5] = 505;
@@ -145,7 +151,7 @@ TEST(Unwind_CodeObjectPCInMiddle_Success) {
uintptr_t stack[3];
void* stack_base = stack + arraysize(stack);
stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP (rbp).
- stack[1] = 202; // Return address into C++ code.
+ StorePc(stack, 1, 202); // Return address into C++ code.
stack[2] = 303; // The SP points here in the caller's frame.
register_state.sp = stack;
@@ -213,7 +219,7 @@ TEST(Unwind_JSEntryBeforeFrame_Fail) {
stack[3] = 131;
stack[4] = 141;
stack[5] = 151;
- stack[6] = 100; // Return address into C++ code.
+ StorePc(stack, 6, 100); // Return address into C++ code.
stack[7] = 303; // The SP points here in the caller's frame.
stack[8] = 404;
stack[9] = 505;
@@ -267,7 +273,7 @@ TEST(Unwind_OneJSFrame_Success) {
stack[3] = 131;
stack[4] = 141;
stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP (rbp).
- stack[6] = 100; // Return address into C++ code.
+ StorePc(stack, 6, 100); // Return address into C++ code.
stack[7] = 303; // The SP points here in the caller's frame.
stack[8] = 404;
stack[9] = 505;
@@ -311,10 +317,10 @@ TEST(Unwind_TwoJSFrames_Success) {
stack[1] = 111;
stack[2] = reinterpret_cast<uintptr_t>(stack + 5); // saved FP (rbp).
// The fake return address is in the JS code range.
- stack[3] = reinterpret_cast<uintptr_t>(code + 10);
+ StorePc(stack, 3, reinterpret_cast<uintptr_t>(code + 10));
stack[4] = 141;
stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP (rbp).
- stack[6] = 100; // Return address into C++ code.
+ StorePc(stack, 6, 100); // Return address into C++ code.
stack[7] = 303; // The SP points here in the caller's frame.
stack[8] = 404;
stack[9] = 505;
@@ -371,7 +377,7 @@ TEST(Unwind_StackBounds_Basic) {
uintptr_t stack[3];
stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP (rbp).
- stack[1] = 202; // Return address into C++ code.
+ StorePc(stack, 1, 202); // Return address into C++ code.
stack[2] = 303; // The SP points here in the caller's frame.
register_state.sp = stack;
@@ -414,12 +420,12 @@ TEST(Unwind_StackBounds_WithUnwinding) {
stack[3] = 131;
stack[4] = 141;
stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP (rbp).
- stack[6] = reinterpret_cast<uintptr_t>(code + 20); // JS code.
+ StorePc(stack, 6, reinterpret_cast<uintptr_t>(code + 20)); // JS code.
stack[7] = 303; // The SP points here in the caller's frame.
stack[8] = 404;
stack[9] = reinterpret_cast<uintptr_t>(stack) +
(12 * sizeof(uintptr_t)); // saved FP (OOB).
- stack[10] = reinterpret_cast<uintptr_t>(code + 20); // JS code.
+ StorePc(stack, 10, reinterpret_cast<uintptr_t>(code + 20)); // JS code.
register_state.sp = stack;
register_state.fp = stack + 5;
@@ -435,7 +441,7 @@ TEST(Unwind_StackBounds_WithUnwinding) {
// Change the return address so that it is not in range. We will not range
// check the stack[9] FP value because we have finished unwinding and the
// contents of rbp does not necessarily have to be the FP in this case.
- stack[10] = 202;
+ StorePc(stack, 10, 202);
unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
stack_base);
CHECK(unwound);
@@ -549,6 +555,76 @@ TEST(PCIsInV8_LargeCodeObject) {
CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
}
+#ifdef USE_SIMULATOR
+// TODO(v8:10026): Make this also work without the simulator. The part that
+// needs modifications is getting the RegisterState.
+class UnwinderTestHelper {
+ public:
+ explicit UnwinderTestHelper(const std::string& test_function)
+ : isolate_(CcTest::isolate()) {
+ CHECK(!instance_);
+ instance_ = this;
+ v8::HandleScope scope(isolate_);
+ v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate_);
+ global->Set(v8_str("TryUnwind"),
+ v8::FunctionTemplate::New(isolate_, TryUnwind));
+ LocalContext env(isolate_, nullptr, global);
+ CompileRun(v8_str(test_function.c_str()));
+ }
+
+ ~UnwinderTestHelper() { instance_ = nullptr; }
+
+ private:
+ static void TryUnwind(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ instance_->DoTryUnwind();
+ }
+
+ void DoTryUnwind() {
+ // Set up RegisterState.
+ v8::RegisterState register_state;
+ SimulatorHelper simulator_helper;
+ if (!simulator_helper.Init(isolate_)) return;
+ simulator_helper.FillRegisters(&register_state);
+ // At this point, the PC will point to a Redirection object, which is not
+ // in V8 as far as the unwinder is concerned. To make this work, point to
+ // the return address, which is in V8, instead.
+ register_state.pc = register_state.lr;
+
+ UnwindState unwind_state = isolate_->GetUnwindState();
+ void* stack_base = reinterpret_cast<void*>(0xffffffffffffffffL);
+ bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state,
+ &register_state, stack_base);
+ // Check that we have successfully unwound past js_entry_sp.
+ CHECK(unwound);
+ CHECK_GT(register_state.sp,
+ reinterpret_cast<void*>(CcTest::i_isolate()->js_entry_sp()));
+ }
+
+ v8::Isolate* isolate_;
+
+ static UnwinderTestHelper* instance_;
+};
+
+UnwinderTestHelper* UnwinderTestHelper::instance_;
+
+TEST(Unwind_TwoNestedFunctions) {
+ i::FLAG_allow_natives_syntax = true;
+ const char* test_script =
+ "function test_unwinder_api_inner() {"
+ " TryUnwind();"
+ " return 0;"
+ "}"
+ "function test_unwinder_api_outer() {"
+ " return test_unwinder_api_inner();"
+ "}"
+ "%NeverOptimizeFunction(test_unwinder_api_inner);"
+ "%NeverOptimizeFunction(test_unwinder_api_outer);"
+ "test_unwinder_api_outer();";
+
+ UnwinderTestHelper helper(test_script);
+}
+#endif
+
#if __clang__
#pragma clang diagnostic pop
#endif
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index 2c4d007c4b..589a90c245 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -115,6 +115,42 @@ TEST(RegExpMatchIsFalseishOnJSRegExp) {
CHECK_EQ(1, use_counts[v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp]);
}
+TEST(ObjectPrototypeHasElements) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ CompileRun("var o = {}; o[1] = 2;");
+ CHECK_EQ(0, use_counts[v8::Isolate::kObjectPrototypeHasElements]);
+
+ CompileRun("var o = {}; var p = {}; o.__proto__ = p; p[1] = 2;");
+ CHECK_EQ(0, use_counts[v8::Isolate::kObjectPrototypeHasElements]);
+
+ CompileRun("Object.prototype[1] = 2;");
+ CHECK_EQ(1, use_counts[v8::Isolate::kObjectPrototypeHasElements]);
+}
+
+TEST(ArrayPrototypeHasElements) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ CompileRun("var a = []; a[1] = 2;");
+ CHECK_EQ(0, use_counts[v8::Isolate::kArrayPrototypeHasElements]);
+
+ CompileRun("var a = []; var p = []; a.__proto__ = p; p[1] = 2;");
+ CHECK_EQ(0, use_counts[v8::Isolate::kArrayPrototypeHasElements]);
+
+ CompileRun("Array.prototype[1] = 2;");
+ CHECK_EQ(1, use_counts[v8::Isolate::kArrayPrototypeHasElements]);
+}
+
} // namespace test_usecounters
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-v8windbg.cc b/deps/v8/test/cctest/test-v8windbg.cc
new file mode 100644
index 0000000000..57f259a377
--- /dev/null
+++ b/deps/v8/test/cctest/test-v8windbg.cc
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+#include "tools/v8windbg/test/v8windbg-test.h"
+
+namespace v8 {
+namespace internal {
+
+UNINITIALIZED_TEST(V8windbg) { v8windbg_test::RunTests(); }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 4e94fe21f0..9442dab6f5 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -736,6 +736,35 @@ TEST(TestTestParentFrameArguments) {
asm_tester.GenerateCode();
}
+TEST(TestFullyGeneratedClassFromCpp) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ CodeAssemblerTester asm_tester(isolate, 1);
+ TestTorqueAssembler m(asm_tester.state());
+ { m.Return(m.TestFullyGeneratedClassFromCpp()); }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ Handle<ExportedSubClass> result =
+ Handle<ExportedSubClass>::cast(ft.Call().ToHandleChecked());
+ CHECK_EQ(result->c_field(), 7);
+ CHECK_EQ(result->d_field(), 8);
+ CHECK_EQ(result->e_field(), 9);
+}
+
+TEST(TestFullyGeneratedClassWithElements) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ CodeAssemblerTester asm_tester(isolate, 1);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestFullyGeneratedClassWithElements();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index df24e83f39..9aa2f380f6 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -45,24 +45,16 @@ const char* TraceExtension::kSource =
v8::Local<v8::FunctionTemplate> TraceExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name) {
- if (name->StrictEquals(
- v8::String::NewFromUtf8(isolate, "trace", v8::NewStringType::kNormal)
- .ToLocalChecked())) {
+ if (name->StrictEquals(v8::String::NewFromUtf8Literal(isolate, "trace"))) {
return v8::FunctionTemplate::New(isolate, TraceExtension::Trace);
} else if (name->StrictEquals(
- v8::String::NewFromUtf8(isolate, "js_trace",
- v8::NewStringType::kNormal)
- .ToLocalChecked())) {
+ v8::String::NewFromUtf8Literal(isolate, "js_trace"))) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSTrace);
} else if (name->StrictEquals(
- v8::String::NewFromUtf8(isolate, "js_entry_sp",
- v8::NewStringType::kNormal)
- .ToLocalChecked())) {
+ v8::String::NewFromUtf8Literal(isolate, "js_entry_sp"))) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySP);
- } else if (name->StrictEquals(
- v8::String::NewFromUtf8(isolate, "js_entry_sp_level2",
- v8::NewStringType::kNormal)
- .ToLocalChecked())) {
+ } else if (name->StrictEquals(v8::String::NewFromUtf8Literal(
+ isolate, "js_entry_sp_level2"))) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySPLevel2);
}
UNREACHABLE();
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index ab8545bf5d..2ac9151684 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -81,7 +81,7 @@ class CWasmEntryArgTester {
WasmRunner<ReturnType, Args...> runner_;
Isolate* isolate_;
std::function<ReturnType(Args...)> expected_fn_;
- FunctionSig* sig_;
+ const FunctionSig* sig_;
Handle<Code> c_wasm_entry_;
WasmCode* wasm_code_;
};
diff --git a/deps/v8/test/cctest/wasm/test-compilation-cache.cc b/deps/v8/test/cctest/wasm/test-compilation-cache.cc
index 416b4e9df2..f4831fa8d1 100644
--- a/deps/v8/test/cctest/wasm/test-compilation-cache.cc
+++ b/deps/v8/test/cctest/wasm/test-compilation-cache.cc
@@ -5,6 +5,7 @@
#include "src/api/api-inl.h"
#include "src/init/v8.h"
+#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
@@ -43,6 +44,35 @@ class TestResolver : public CompilationResultResolver {
std::atomic<int>* pending_;
};
+class StreamTester {
+ public:
+ explicit StreamTester(std::shared_ptr<TestResolver> test_resolver)
+ : internal_scope_(CcTest::i_isolate()), test_resolver_(test_resolver) {
+ i::Isolate* i_isolate = CcTest::i_isolate();
+
+ Handle<Context> context = i_isolate->native_context();
+
+ stream_ = i_isolate->wasm_engine()->StartStreamingCompilation(
+ i_isolate, WasmFeatures::All(), context,
+ "WebAssembly.compileStreaming()", test_resolver_);
+ }
+
+ void OnBytesReceived(const uint8_t* start, size_t length) {
+ stream_->OnBytesReceived(Vector<const uint8_t>(start, length));
+ }
+
+ void FinishStream() { stream_->Finish(); }
+
+ void SetCompiledModuleBytes(const uint8_t* start, size_t length) {
+ stream_->SetCompiledModuleBytes(Vector<const uint8_t>(start, length));
+ }
+
+ private:
+ i::HandleScope internal_scope_;
+ std::shared_ptr<StreamingDecoder> stream_;
+ std::shared_ptr<TestResolver> test_resolver_;
+};
+
// Create a valid module such that the bytes depend on {n}.
ZoneBuffer GetValidModuleBytes(Zone* zone, int n) {
ZoneBuffer buffer(zone);
@@ -57,11 +87,51 @@ ZoneBuffer GetValidModuleBytes(Zone* zone, int n) {
return buffer;
}
+std::shared_ptr<NativeModule> SyncCompile(Vector<const uint8_t> bytes) {
+ ErrorThrower thrower(CcTest::i_isolate(), "Test");
+ auto enabled_features = WasmFeatures::FromIsolate(CcTest::i_isolate());
+ auto wire_bytes = ModuleWireBytes(bytes.begin(), bytes.end());
+ Handle<WasmModuleObject> module =
+ CcTest::i_isolate()
+ ->wasm_engine()
+ ->SyncCompile(CcTest::i_isolate(), enabled_features, &thrower,
+ wire_bytes)
+ .ToHandleChecked();
+ return module->shared_native_module();
+}
+
+// Shared prefix.
+constexpr uint8_t kPrefix[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_v_v), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_v_v, // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(2), // section size
+ U32V_1(1), // functions count
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(7), // section size
+ U32V_1(1), // functions count
+ 5, // body size
+};
+
+constexpr uint8_t kFunctionA[] = {
+ U32V_1(0), kExprI32Const, U32V_1(0), kExprDrop, kExprEnd,
+};
+constexpr uint8_t kFunctionB[] = {
+ U32V_1(0), kExprI32Const, U32V_1(1), kExprDrop, kExprEnd,
+};
+
+constexpr size_t kPrefixSize = arraysize(kPrefix);
+constexpr size_t kFunctionSize = arraysize(kFunctionA);
+
} // namespace
TEST(TestAsyncCache) {
CcTest::InitializeVM();
- i::HandleScope internal_scope_(CcTest::i_isolate());
+ i::HandleScope internal_scope(CcTest::i_isolate());
AccountingAllocator allocator;
Zone zone(&allocator, "CompilationCacheTester");
@@ -95,6 +165,74 @@ TEST(TestAsyncCache) {
CHECK_NE(resolverA1->native_module(), resolverB->native_module());
}
+TEST(TestStreamingCache) {
+ CcTest::InitializeVM();
+
+ std::atomic<int> pending(3);
+ auto resolverA1 = std::make_shared<TestResolver>(&pending);
+ auto resolverA2 = std::make_shared<TestResolver>(&pending);
+ auto resolverB = std::make_shared<TestResolver>(&pending);
+
+ StreamTester testerA1(resolverA1);
+ StreamTester testerA2(resolverA2);
+ StreamTester testerB(resolverB);
+
+ // Start receiving kPrefix bytes.
+ testerA1.OnBytesReceived(kPrefix, kPrefixSize);
+ testerA2.OnBytesReceived(kPrefix, kPrefixSize);
+ testerB.OnBytesReceived(kPrefix, kPrefixSize);
+
+ // Receive function bytes and start streaming compilation.
+ testerA1.OnBytesReceived(kFunctionA, kFunctionSize);
+ testerA1.FinishStream();
+ testerA2.OnBytesReceived(kFunctionA, kFunctionSize);
+ testerA2.FinishStream();
+ testerB.OnBytesReceived(kFunctionB, kFunctionSize);
+ testerB.FinishStream();
+
+ while (pending > 0) {
+ v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
+ CcTest::isolate());
+ }
+
+ std::shared_ptr<NativeModule> native_module_A1 = resolverA1->native_module();
+ std::shared_ptr<NativeModule> native_module_A2 = resolverA2->native_module();
+ std::shared_ptr<NativeModule> native_module_B = resolverB->native_module();
+ CHECK_EQ(native_module_A1, native_module_A2);
+ CHECK_NE(native_module_A1, native_module_B);
+}
+
+TEST(TestStreamingAndSyncCache) {
+ CcTest::InitializeVM();
+
+ std::atomic<int> pending(1);
+ auto resolver = std::make_shared<TestResolver>(&pending);
+ StreamTester tester(resolver);
+
+ tester.OnBytesReceived(kPrefix, kPrefixSize);
+
+ // Compile the same module synchronously to make sure we don't deadlock
+ // waiting for streaming compilation to finish.
+ auto full_bytes = OwnedVector<uint8_t>::New(kPrefixSize + kFunctionSize);
+ memcpy(full_bytes.begin(), kPrefix, kPrefixSize);
+ memcpy(full_bytes.begin() + kPrefixSize, kFunctionA, kFunctionSize);
+ auto native_module_sync = SyncCompile(full_bytes.as_vector());
+
+ // Streaming compilation should just discard its native module now and use the
+ // one inserted in the cache by sync compilation.
+ tester.OnBytesReceived(kFunctionA, kFunctionSize);
+ tester.FinishStream();
+
+ while (pending > 0) {
+ v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
+ CcTest::isolate());
+ }
+
+ std::shared_ptr<NativeModule> native_module_streaming =
+ resolver->native_module();
+ CHECK_EQ(native_module_streaming, native_module_sync);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-grow-memory.cc b/deps/v8/test/cctest/wasm/test-grow-memory.cc
index a188707cae..0bf85c2b40 100644
--- a/deps/v8/test/cctest/wasm/test-grow-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-grow-memory.cc
@@ -113,7 +113,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
ManuallyExternalizedBuffer external2(
handle(memory_object->array_buffer(), isolate));
- // Grow using an internal WASM bytecode.
+ // Grow using an internal Wasm bytecode.
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
CHECK_EQ(26, result);
CHECK(external2.buffer_->was_detached()); // growing always detaches
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index 3bbc639ca5..4984bf4524 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -61,18 +61,69 @@ class LiftoffCompileEnvironment {
CHECK_EQ(detected1, detected2);
}
- DebugSideTable GenerateDebugSideTable(
+ std::unique_ptr<DebugSideTable> GenerateDebugSideTable(
std::initializer_list<ValueType> return_types,
std::initializer_list<ValueType> param_types,
- std::initializer_list<uint8_t> raw_function_bytes) {
+ std::initializer_list<uint8_t> raw_function_bytes,
+ std::vector<int> breakpoints = {}) {
auto test_func = AddFunction(return_types, param_types, raw_function_bytes);
- CompilationEnv env = module_builder_.CreateCompilationEnv();
- return GenerateLiftoffDebugSideTable(CcTest::i_isolate()->allocator(), &env,
- test_func.body);
+ CompilationEnv env = module_builder_.CreateCompilationEnv(
+ breakpoints.empty() ? TestingModuleBuilder::kNoDebug
+ : TestingModuleBuilder::kDebug);
+ WasmFeatures detected;
+ std::unique_ptr<DebugSideTable> debug_side_table_via_compilation;
+ ExecuteLiftoffCompilation(
+ CcTest::i_isolate()->allocator(), &env, test_func.body, 0, nullptr,
+ &detected, VectorOf(breakpoints), &debug_side_table_via_compilation);
+
+ // If there are no breakpoint, then {ExecuteLiftoffCompilation} should
+ // provide the same debug side table.
+ if (breakpoints.empty()) {
+ std::unique_ptr<DebugSideTable> debug_side_table =
+ GenerateLiftoffDebugSideTable(CcTest::i_isolate()->allocator(), &env,
+ test_func.body);
+ CheckTableEquals(*debug_side_table, *debug_side_table_via_compilation);
+ }
+
+ return debug_side_table_via_compilation;
}
private:
+ static void CheckTableEquals(const DebugSideTable& a,
+ const DebugSideTable& b) {
+ CHECK_EQ(a.num_locals(), b.num_locals());
+ CHECK(std::equal(a.entries().begin(), a.entries().end(),
+ b.entries().begin(), b.entries().end(),
+ &CheckEntryEquals));
+ }
+
+ static bool CheckEntryEquals(const DebugSideTable::Entry& a,
+ const DebugSideTable::Entry& b) {
+ CHECK_EQ(a.pc_offset(), b.pc_offset());
+ CHECK(std::equal(a.values().begin(), a.values().end(), b.values().begin(),
+ b.values().end(), &CheckValueEquals));
+ return true;
+ }
+
+ static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
+ const DebugSideTable::Entry::Value& b) {
+ CHECK_EQ(a.type, b.type);
+ CHECK_EQ(a.kind, b.kind);
+ switch (a.kind) {
+ case DebugSideTable::Entry::kConstant:
+ CHECK_EQ(a.i32_const, b.i32_const);
+ break;
+ case DebugSideTable::Entry::kRegister:
+ CHECK_EQ(a.reg_code, b.reg_code);
+ break;
+ case DebugSideTable::Entry::kStack:
+ CHECK_EQ(a.stack_offset, b.stack_offset);
+ break;
+ }
+ return true;
+ }
+
OwnedVector<uint8_t> GenerateFunctionBody(
std::initializer_list<uint8_t> raw_function_bytes) {
// Build the function bytes by prepending the locals decl and appending an
@@ -122,30 +173,52 @@ class LiftoffCompileEnvironment {
};
struct DebugSideTableEntry {
- std::vector<ValueType> stack_types;
- std::vector<std::pair<int, int>> constants;
+ std::vector<DebugSideTable::Entry::Value> values;
+
+ // Construct via vector or implicitly via initializer list.
+ explicit DebugSideTableEntry(std::vector<DebugSideTable::Entry::Value> values)
+ : values(std::move(values)) {}
+ DebugSideTableEntry(
+ std::initializer_list<DebugSideTable::Entry::Value> values)
+ : values(values) {}
bool operator==(const DebugSideTableEntry& other) const {
- return stack_types == other.stack_types && constants == other.constants;
+ if (values.size() != other.values.size()) return false;
+ for (size_t i = 0; i < values.size(); ++i) {
+ if (values[i].type != other.values[i].type) return false;
+ if (values[i].kind != other.values[i].kind) return false;
+ // Stack offsets and register codes are platform dependent, so only check
+ // constants here.
+ if (values[i].kind == DebugSideTable::Entry::kConstant &&
+ values[i].i32_const != other.values[i].i32_const) {
+ return false;
+ }
+ }
+ return true;
}
};
// Debug builds will print the vector of DebugSideTableEntry.
#ifdef DEBUG
std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) {
- out << "{stack types [";
+ out << "{";
const char* comma = "";
- for (ValueType type : entry.stack_types) {
- out << comma << ValueTypes::TypeName(type);
- comma = ", ";
- }
- comma = "";
- out << "], constants: [";
- for (auto& c : entry.constants) {
- out << comma << "<" << c.first << ", " << c.second << ">";
+ for (auto& v : entry.values) {
+ out << comma << v.type.type_name() << " ";
+ switch (v.kind) {
+ case DebugSideTable::Entry::kConstant:
+ out << "const:" << v.i32_const;
+ break;
+ case DebugSideTable::Entry::kRegister:
+ out << "reg";
+ break;
+ case DebugSideTable::Entry::kStack:
+ out << "stack";
+ break;
+ }
comma = ", ";
}
- return out << "]}";
+ return out << "}";
}
std::ostream& operator<<(std::ostream& out,
@@ -154,28 +227,36 @@ std::ostream& operator<<(std::ostream& out,
}
#endif // DEBUG
-void CheckDebugSideTable(std::vector<ValueType> expected_local_types,
- std::vector<DebugSideTableEntry> expected_entries,
- const wasm::DebugSideTable& debug_side_table) {
- std::vector<ValueType> local_types;
- for (int i = 0; i < debug_side_table.num_locals(); ++i) {
- local_types.push_back(debug_side_table.local_type(i));
- }
+// Named constructors to make the tests more readable.
+DebugSideTable::Entry::Value Constant(ValueType type, int32_t constant) {
+ DebugSideTable::Entry::Value value;
+ value.type = type;
+ value.kind = DebugSideTable::Entry::kConstant;
+ value.i32_const = constant;
+ return value;
+}
+DebugSideTable::Entry::Value Register(ValueType type) {
+ DebugSideTable::Entry::Value value;
+ value.type = type;
+ value.kind = DebugSideTable::Entry::kRegister;
+ return value;
+}
+DebugSideTable::Entry::Value Stack(ValueType type) {
+ DebugSideTable::Entry::Value value;
+ value.type = type;
+ value.kind = DebugSideTable::Entry::kStack;
+ return value;
+}
+
+void CheckDebugSideTable(std::vector<DebugSideTableEntry> expected_entries,
+ const wasm::DebugSideTable* debug_side_table) {
std::vector<DebugSideTableEntry> entries;
- for (auto& entry : debug_side_table.entries()) {
- std::vector<ValueType> stack_types;
- for (int i = 0; i < entry.stack_height(); ++i) {
- stack_types.push_back(entry.stack_type(i));
- }
- std::vector<std::pair<int, int>> constants;
- int locals_plus_stack =
- debug_side_table.num_locals() + entry.stack_height();
- for (int i = 0; i < locals_plus_stack; ++i) {
- if (entry.IsConstant(i)) constants.emplace_back(i, entry.GetConstant(i));
- }
- entries.push_back({std::move(stack_types), std::move(constants)});
+ for (auto& entry : debug_side_table->entries()) {
+ auto values = entry.values();
+ entries.push_back(
+ DebugSideTableEntry{std::vector<DebugSideTable::Entry::Value>{
+ values.begin(), values.end()}});
}
- CHECK_EQ(expected_local_types, local_types);
CHECK_EQ(expected_entries, entries);
}
@@ -223,12 +304,12 @@ TEST(Liftoff_debug_side_table_simple) {
auto debug_side_table = env.GenerateDebugSideTable(
{kWasmI32}, {kWasmI32, kWasmI32},
{WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))});
- CheckDebugSideTable({kWasmI32, kWasmI32},
- {
- // OOL stack check, stack: {}
- {{}, {}},
- },
- debug_side_table);
+ CheckDebugSideTable(
+ {
+ // OOL stack check, locals spilled, stack empty.
+ {Stack(kWasmI32), Stack(kWasmI32)},
+ },
+ debug_side_table.get());
}
TEST(Liftoff_debug_side_table_call) {
@@ -237,14 +318,14 @@ TEST(Liftoff_debug_side_table_call) {
{kWasmI32}, {kWasmI32},
{WASM_I32_ADD(WASM_CALL_FUNCTION(0, WASM_GET_LOCAL(0)),
WASM_GET_LOCAL(0))});
- CheckDebugSideTable({kWasmI32},
- {
- // call, stack: {}
- {{}, {}},
- // OOL stack check, stack: {}
- {{}, {}},
- },
- debug_side_table);
+ CheckDebugSideTable(
+ {
+ // call, local spilled, stack empty.
+ {Stack(kWasmI32)},
+ // OOL stack check, local spilled, stack empty.
+ {Stack(kWasmI32)},
+ },
+ debug_side_table.get());
}
TEST(Liftoff_debug_side_table_call_const) {
@@ -255,14 +336,14 @@ TEST(Liftoff_debug_side_table_call_const) {
{WASM_SET_LOCAL(0, WASM_I32V_1(kConst)),
WASM_I32_ADD(WASM_CALL_FUNCTION(0, WASM_GET_LOCAL(0)),
WASM_GET_LOCAL(0))});
- CheckDebugSideTable({kWasmI32},
- {
- // call, stack: {}, local0 is kConst
- {{}, {{0, kConst}}},
- // OOL stack check, stack: {}
- {{}, {}},
- },
- debug_side_table);
+ CheckDebugSideTable(
+ {
+ // call, local is kConst.
+ {Constant(kWasmI32, kConst)},
+ // OOL stack check, local spilled.
+ {Stack(kWasmI32)},
+ },
+ debug_side_table.get());
}
TEST(Liftoff_debug_side_table_indirect_call) {
@@ -272,18 +353,18 @@ TEST(Liftoff_debug_side_table_indirect_call) {
{kWasmI32}, {kWasmI32},
{WASM_I32_ADD(WASM_CALL_INDIRECT(0, WASM_I32V_1(47), WASM_GET_LOCAL(0)),
WASM_GET_LOCAL(0))});
- CheckDebugSideTable({kWasmI32},
- {
- // indirect call, stack: {}
- {{}, {}},
- // OOL stack check, stack: {}
- {{}, {}},
- // OOL trap (invalid index), stack: {kConst}
- {{kWasmI32}, {{1, kConst}}},
- // OOL trap (sig mismatch), stack: {kConst}
- {{kWasmI32}, {{1, kConst}}},
- },
- debug_side_table);
+ CheckDebugSideTable(
+ {
+ // indirect call, local spilled, stack empty.
+ {Stack(kWasmI32)},
+ // OOL stack check, local spilled, stack empty.
+ {Stack(kWasmI32)},
+ // OOL trap (invalid index), local spilled, stack has {kConst}.
+ {Stack(kWasmI32), Constant(kWasmI32, kConst)},
+ // OOL trap (sig mismatch), local spilled, stack has {kConst}.
+ {Stack(kWasmI32), Constant(kWasmI32, kConst)},
+ },
+ debug_side_table.get());
}
TEST(Liftoff_debug_side_table_loop) {
@@ -292,14 +373,14 @@ TEST(Liftoff_debug_side_table_loop) {
auto debug_side_table = env.GenerateDebugSideTable(
{kWasmI32}, {kWasmI32},
{WASM_I32V_1(kConst), WASM_LOOP(WASM_BR_IF(0, WASM_GET_LOCAL(0)))});
- CheckDebugSideTable({kWasmI32},
- {
- // OOL stack check, stack: {}
- {{}, {}},
- // OOL loop stack check, stack: {kConst}
- {{kWasmI32}, {{1, kConst}}},
- },
- debug_side_table);
+ CheckDebugSideTable(
+ {
+ // OOL stack check, local spilled, stack empty.
+ {Stack(kWasmI32)},
+ // OOL loop stack check, local spilled, stack has {kConst}.
+ {Stack(kWasmI32), Constant(kWasmI32, kConst)},
+ },
+ debug_side_table.get());
}
TEST(Liftoff_debug_side_table_trap) {
@@ -307,16 +388,39 @@ TEST(Liftoff_debug_side_table_trap) {
auto debug_side_table = env.GenerateDebugSideTable(
{kWasmI32}, {kWasmI32, kWasmI32},
{WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))});
- CheckDebugSideTable({kWasmI32, kWasmI32},
- {
- // OOL stack check, stack: {}
- {{}, {}},
- // OOL trap (div by zero), stack: {}
- {{}, {}},
- // OOL trap (result unrepresentable), stack: {}
- {{}, {}},
- },
- debug_side_table);
+ CheckDebugSideTable(
+ {
+ // OOL stack check, local spilled, stack empty.
+ {Stack(kWasmI32), Stack(kWasmI32)},
+ // OOL trap (div by zero), locals spilled, stack empty.
+ {Stack(kWasmI32), Stack(kWasmI32)},
+ // OOL trap (result unrepresentable), locals spilled, stack empty.
+ {Stack(kWasmI32), Stack(kWasmI32)},
+ },
+ debug_side_table.get());
+}
+
+TEST(Liftoff_breakpoint_simple) {
+ LiftoffCompileEnvironment env;
+ // Set two breakpoints. At both locations, values are live in registers.
+ auto debug_side_table = env.GenerateDebugSideTable(
+ {kWasmI32}, {kWasmI32, kWasmI32},
+ {WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))},
+ {
+ 1, // break at beginning of function (first local.get)
+ 5 // break at i32.add
+ });
+ CheckDebugSideTable(
+ {
+ // First break point, locals in registers.
+ {Register(kWasmI32), Register(kWasmI32)},
+ // Second break point, locals and two stack values in registers.
+ {Register(kWasmI32), Register(kWasmI32), Register(kWasmI32),
+ Register(kWasmI32)},
+ // OOL stack check, locals spilled, stack empty.
+ {Stack(kWasmI32), Stack(kWasmI32)},
+ },
+ debug_side_table.get());
}
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 94fa0a31b2..f6cdff61ba 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -1419,7 +1419,7 @@ WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
WASM_ZERO);
- byte memsize = ValueTypes::MemSize(machineTypes[m]);
+ byte memsize = machineTypes[m].MemSize();
uint32_t boundary = num_bytes - 8 - memsize;
CHECK_EQ(0, r.Call(boundary)); // in bounds.
CHECK_EQ(0, memcmp(&memory[0], &memory[8 + boundary], memsize));
@@ -1536,9 +1536,9 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
// Build the selector function.
// =========================================================================
FunctionSig::Builder b(&zone, 1, num_params);
- b.AddReturn(ValueTypes::ValueTypeFor(result));
+ b.AddReturn(ValueType::For(result));
for (int i = 0; i < num_params; i++) {
- b.AddParam(ValueTypes::ValueTypeFor(memtypes[i]));
+ b.AddParam(ValueType::For(memtypes[i]));
}
WasmFunctionCompiler& t = r.NewFunction(b.Build());
BUILD(t, WASM_GET_LOCAL(which));
@@ -1558,7 +1558,7 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
ADD_CODE(code, WASM_CALL_FUNCTION0(t.function_index()));
// Store the result in a local.
- byte local_index = r.AllocateLocal(ValueTypes::ValueTypeFor(result));
+ byte local_index = r.AllocateLocal(ValueType::For(result));
ADD_CODE(code, kExprLocalSet, local_index);
// Store the result in memory.
@@ -1575,7 +1575,7 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
r.builder().RandomizeMemory();
CHECK_EQ(kExpected, r.Call());
- int size = ValueTypes::MemSize(result);
+ int size = result.MemSize();
for (int i = 0; i < size; i++) {
int base = (which + 1) * kElemSize;
byte expected = r.builder().raw_mem_at<byte>(base + i);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 082c5d07c9..178f86bac9 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -752,6 +752,52 @@ WASM_EXEC_TEST(I64AtomicAddUseOnlyHighWord) {
CHECK_EQ(0x12345678, r.Call());
}
+WASM_EXEC_TEST(I64AtomicCompareExchangeUseOnlyLowWord) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ memory[1] = 0x1234567890abcdeful;
+ r.builder().SetHasSharedMemory();
+ // Test that we can use just the low word of an I64AtomicLoad.
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V(8), WASM_I64V(1),
+ WASM_I64V(memory[1]), MachineRepresentation::kWord64)));
+ CHECK_EQ(0x90abcdef, r.Call());
+}
+
+WASM_EXEC_TEST(I64AtomicCompareExchangeUseOnlyHighWord) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ memory[1] = 0x1234567890abcdeful;
+ r.builder().SetHasSharedMemory();
+ // Test that we can use just the high word of an I64AtomicLoad.
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_ROR(
+ WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange, WASM_I32V(8), WASM_I64V(1),
+ WASM_I64V(memory[1]), MachineRepresentation::kWord64),
+ WASM_I64V(32))));
+ CHECK_EQ(0x12345678, r.Call());
+}
+
+WASM_EXEC_TEST(I64AtomicCompareExchange32UZeroExtended) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ memory[1] = 0;
+ r.builder().SetHasSharedMemory();
+ // Test that the high word of the expected value is cleared in the return
+ // value.
+ BUILD(r, WASM_I64_EQZ(WASM_ATOMICS_TERNARY_OP(
+ kExprI64AtomicCompareExchange32U, WASM_I32V(8),
+ WASM_I64V(0x1234567800000000), WASM_I64V(0),
+ MachineRepresentation::kWord32)));
+ CHECK_EQ(1, r.Call());
+}
+
} // namespace test_run_wasm_atomics_64
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index 745196210a..cddc6f7468 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -160,17 +160,15 @@ WASM_EXEC_TEST(TryCatchTrapTypeError) {
namespace {
-// TODO(8729): The semantics of this are not yet specified and might change,
-// this test aims at keeping semantics of various execution tiers consistent.
-void TestTryCatchTrap(byte* code, size_t code_size,
- ExecutionTier execution_tier) {
+void TestTrapNotCaught(byte* code, size_t code_size,
+ ExecutionTier execution_tier) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
- WasmRunner<uint32_t, uint32_t> r(execution_tier, nullptr, "main",
- kRuntimeExceptionSupport);
+ WasmRunner<uint32_t> r(execution_tier, nullptr, "main",
+ kRuntimeExceptionSupport);
r.builder().AddMemory(kWasmPageSize);
- constexpr uint32_t kResult0 = 23;
- constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kResultSuccess = 23;
+ constexpr uint32_t kResultCaught = 47;
// Build a trapping helper function.
WasmFunctionCompiler& trap_func = r.NewFunction(sigs.i_ii());
@@ -179,39 +177,36 @@ void TestTryCatchTrap(byte* code, size_t code_size,
// Build the main test function.
BUILD(r, WASM_TRY_CATCH_T(
kWasmI32,
- WASM_STMTS(WASM_I32V(kResult1),
- WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
- WASM_STMTS(WASM_CALL_FUNCTION(
- trap_func.function_index(),
- WASM_I32V(7), WASM_I32V(9)),
- WASM_DROP))),
- WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
+ WASM_STMTS(WASM_I32V(kResultSuccess),
+ WASM_CALL_FUNCTION(trap_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP),
+ WASM_STMTS(WASM_DROP, WASM_I32V(kResultCaught))));
// Need to call through JS to allow for creation of stack traces.
- r.CheckCallViaJS(kResult0, 0);
- r.CheckCallViaJS(kResult1, 1);
+ r.CheckCallViaJSTraps();
}
} // namespace
WASM_EXEC_TEST(TryCatchTrapUnreachable) {
byte code[] = {WASM_UNREACHABLE};
- TestTryCatchTrap(code, arraysize(code), execution_tier);
+ TestTrapNotCaught(code, arraysize(code), execution_tier);
}
WASM_EXEC_TEST(TryCatchTrapMemOutOfBounds) {
byte code[] = {WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-1))};
- TestTryCatchTrap(code, arraysize(code), execution_tier);
+ TestTrapNotCaught(code, arraysize(code), execution_tier);
}
WASM_EXEC_TEST(TryCatchTrapDivByZero) {
byte code[] = {WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(0))};
- TestTryCatchTrap(code, arraysize(code), execution_tier);
+ TestTrapNotCaught(code, arraysize(code), execution_tier);
}
WASM_EXEC_TEST(TryCatchTrapRemByZero) {
byte code[] = {WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(0))};
- TestTryCatchTrap(code, arraysize(code), execution_tier);
+ TestTrapNotCaught(code, arraysize(code), execution_tier);
}
} // namespace test_run_wasm_exceptions
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index c8cc836b87..23ab076e4a 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -48,23 +48,6 @@ using Int8ShiftOp = int8_t (*)(int8_t, int);
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
} \
- TEST(RunWasm_##name##_interpreter) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
- } \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kTurbofan); \
- } \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
-
-#define WASM_SIMD_TEST_WITH_LIFTOFF(name) \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- ExecutionTier execution_tier); \
- TEST(RunWasm_##name##_turbofan) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
- } \
TEST(RunWasm_##name##_liftoff) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kLiftoff); \
@@ -305,6 +288,11 @@ T AndNot(T a, T b) {
return a & ~b;
}
+template <typename T>
+T Abs(T a) {
+ return std::abs(a);
+}
+
// only used for F64x2 tests below
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
@@ -318,7 +306,7 @@ int64_t Less(double a, double b) { return a < b ? -1 : 0; }
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
// Only used for qfma and qfms tests below.
// FMOperation holds the params (a, b, c) for a Multiply-Add or
@@ -393,15 +381,17 @@ static constexpr Vector<const FMOperation<T>> qfms_vector() {
return ArrayVector(qfms_array<T>);
}
-// Fused results only when fma3 feature is enabled, and running on TurboFan.
+// Fused results only when fma3 feature is enabled, and running on TurboFan or
+// Liftoff (which can fall back to TurboFan if FMA is not implemented).
bool ExpectFused(ExecutionTier tier) {
#ifdef V8_TARGET_ARCH_X64
- return CpuFeatures::IsSupported(FMA3) && (tier == ExecutionTier::kTurbofan);
+ return CpuFeatures::IsSupported(FMA3) &&
+ (tier == ExecutionTier::kTurbofan || tier == ExecutionTier::kLiftoff);
#else
- return (tier == ExecutionTier::kTurbofan);
+ return (tier == ExecutionTier::kTurbofan || tier == ExecutionTier::kLiftoff);
#endif
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
} // namespace
@@ -419,6 +409,9 @@ bool ExpectFused(ExecutionTier tier) {
WASM_RETURN1(WASM_ZERO))
#define TO_BYTE(val) static_cast<byte>(val)
+// TODO(v8:10258): We have support for emitting multi-byte opcodes now, so this
+// can change to simply, op, once the decoder is fixed to decode multi byte
+// opcodes.
#define WASM_SIMD_OP(op) kSimdPrefix, TO_BYTE(op)
#define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
@@ -519,6 +512,11 @@ bool ExpectFused(ExecutionTier tier) {
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kTurbofan); \
} \
+ TEST(RunWasm_##name##_liftoff) { \
+ if (!CpuFeatures::SupportsWasmSimd128()) return; \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kLiftoff); \
+ } \
TEST(RunWasm_##name##_interpreter) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
@@ -550,7 +548,7 @@ WASM_SIMD_TEST(S128Globals) {
// Set up a global to hold input and output vectors.
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
- BUILD(r, WASM_SET_GLOBAL(1, WASM_GET_GLOBAL(0)), WASM_ONE);
+ BUILD_V(r, WASM_SET_GLOBAL(1, WASM_GET_GLOBAL(0)), WASM_ONE);
FOR_INT32_INPUTS(x) {
for (int i = 0; i < 4; i++) {
@@ -565,7 +563,7 @@ WASM_SIMD_TEST(S128Globals) {
}
}
-WASM_SIMD_TEST_WITH_LIFTOFF(F32x4Splat) {
+WASM_SIMD_TEST(F32x4Splat) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
float* g = r.builder().AddGlobal<float>(kWasmS128);
@@ -753,11 +751,13 @@ WASM_SIMD_TEST(F32x4Sqrt) {
}
WASM_SIMD_TEST(F32x4RecipApprox) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
base::Recip, false /* !exact */);
}
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
base::RecipSqrt, false /* !exact */);
}
@@ -883,8 +883,9 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
@@ -909,6 +910,7 @@ WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
}
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
@@ -931,15 +933,15 @@ WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
}
}
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
byte param1 = 0;
- BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
- WASM_ONE);
+ BUILD_V(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+ WASM_ONE);
FOR_INT64_INPUTS(x) {
r.Call(x);
@@ -1014,22 +1016,28 @@ void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
// Intentionally shift by 64, should be no-op.
for (int shift = 1; shift <= 64; shift++) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
byte value = 0;
- byte shift_index = r.AllocateLocal(kWasmI32);
- byte simd1 = r.AllocateLocal(kWasmS128);
- BUILD(r,
- WASM_SET_LOCAL(simd1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
- WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
- WASM_GET_LOCAL(shift_index))),
- WASM_ONE);
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_SET_LOCAL(simd, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
+ WASM_I32V(shift))),
+ WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_GET_LOCAL(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+ r.builder().WriteMemory(&memory[0], shift);
FOR_INT64_INPUTS(x) {
r.Call(x);
int64_t expected = expected_op(x, shift);
for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
}
}
}
@@ -1052,6 +1060,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64BinOp expected_op) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
// Global to hold output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
@@ -1086,7 +1095,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Sub) {
base::SubWithWraparound);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
}
@@ -1128,7 +1137,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
UnsignedGreaterEqual);
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
@@ -1197,7 +1206,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
CHECK_EQ(1., ReadLittleEndianValue<double>(&g1[1]));
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L(
@@ -1217,7 +1226,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
bool IsExtreme(double x) {
double abs_x = std::fabs(x);
@@ -1480,7 +1489,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
base::MulWithWraparound);
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(I64x2MinS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinS, Minimum);
}
@@ -1498,10 +1507,11 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2MaxU) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxU,
UnsignedMaximum);
}
-#endif // V8_TARGET_ARCH_X64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
@@ -1526,6 +1536,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
@@ -1548,7 +1559,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
}
}
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -1650,6 +1661,68 @@ WASM_SIMD_TEST(I16x8ReplaceLane) {
}
}
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+WASM_SIMD_TEST_NO_LOWERING(I8x16BitMask) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = r.AllocateLocal(kWasmS128);
+
+ BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(value1), WASM_I32V(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I8x16_REPLACE_LANE(
+ 1, WASM_GET_LOCAL(value1), WASM_I32V(-1))),
+ WASM_SIMD_UNOP(kExprI8x16BitMask, WASM_GET_LOCAL(value1)));
+
+ FOR_INT8_INPUTS(x) {
+ int32_t actual = r.Call(x);
+ // Lane 0 is always 0 (positive), lane 1 is always -1.
+ int32_t expected = std::signbit(static_cast<double>(x)) ? 0xFFFE : 0x0002;
+ CHECK_EQ(actual, expected);
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8BitMask) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = r.AllocateLocal(kWasmS128);
+
+ BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(value1), WASM_I32V(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I16x8_REPLACE_LANE(
+ 1, WASM_GET_LOCAL(value1), WASM_I32V(-1))),
+ WASM_SIMD_UNOP(kExprI16x8BitMask, WASM_GET_LOCAL(value1)));
+
+ FOR_INT16_INPUTS(x) {
+ int32_t actual = r.Call(x);
+ // Lane 0 is always 0 (positive), lane 1 is always -1.
+ int32_t expected = std::signbit(static_cast<double>(x)) ? 0xFE : 2;
+ CHECK_EQ(actual, expected);
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4BitMask) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = r.AllocateLocal(kWasmS128);
+
+ BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(value1), WASM_I32V(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I32x4_REPLACE_LANE(
+ 1, WASM_GET_LOCAL(value1), WASM_I32V(-1))),
+ WASM_SIMD_UNOP(kExprI32x4BitMask, WASM_GET_LOCAL(value1)));
+
+ FOR_INT32_INPUTS(x) {
+ int32_t actual = r.Call(x);
+ // Lane 0 is always 0 (positive), lane 1 is always -1.
+ int32_t expected = std::signbit(static_cast<double>(x)) ? 0xE : 2;
+ CHECK_EQ(actual, expected);
+ }
+}
+#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+
WASM_SIMD_TEST(I8x16Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -1828,6 +1901,10 @@ WASM_SIMD_TEST(I32x4Neg) {
base::NegateWithWraparound);
}
+WASM_SIMD_TEST(I32x4Abs) {
+ RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Abs, Abs);
+}
+
WASM_SIMD_TEST(S128Not) {
RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not, Not);
}
@@ -1955,21 +2032,28 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
// Intentionally shift by 32, should be no-op.
for (int shift = 1; shift <= 32; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
+ int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
byte value = 0;
- byte shift_index = r.AllocateLocal(kWasmI32);
- byte simd1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
- WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
- WASM_GET_LOCAL(shift_index))),
- WASM_ONE);
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
+ WASM_I32V(shift))),
+ WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_GET_LOCAL(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+ r.builder().WriteMemory(&memory[0], shift);
FOR_INT32_INPUTS(x) {
r.Call(x);
int32_t expected = expected_op(x, shift);
for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
}
}
}
@@ -2080,6 +2164,10 @@ WASM_SIMD_TEST(I16x8Neg) {
base::NegateWithWraparound);
}
+WASM_SIMD_TEST(I16x8Abs) {
+ RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Abs, Abs);
+}
+
template <typename T = int16_t, typename OpType = T (*)(T, T)>
void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, OpType expected_op) {
@@ -2202,7 +2290,7 @@ WASM_SIMD_TEST(I16x8LeU) {
UnsignedLessEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8RoundingAverageU) {
+WASM_SIMD_TEST(I16x8RoundingAverageU) {
RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd,
kExprI16x8RoundingAverageU,
base::RoundingAverageUnsigned);
@@ -2213,22 +2301,28 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
// Intentionally shift by 16, should be no-op.
for (int shift = 1; shift <= 16; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
+ int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
byte value = 0;
- byte simd1 = r.AllocateLocal(kWasmS128);
- byte shift_index = r.AllocateLocal(kWasmI32);
- BUILD(r,
- WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
- WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
- WASM_GET_LOCAL(shift_index))),
- WASM_ONE);
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
+ WASM_I32V(shift))),
+ WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_GET_LOCAL(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+ r.builder().WriteMemory(&memory[0], shift);
FOR_INT16_INPUTS(x) {
r.Call(x);
int16_t expected = expected_op(x, shift);
for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
}
}
}
@@ -2275,6 +2369,10 @@ WASM_SIMD_TEST(I8x16Neg) {
base::NegateWithWraparound);
}
+WASM_SIMD_TEST(I8x16Abs) {
+ RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
+}
+
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -2426,7 +2524,7 @@ WASM_SIMD_TEST(I8x16Mul) {
base::MulWithWraparound);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16RoundingAverageU) {
+WASM_SIMD_TEST(I8x16RoundingAverageU) {
RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd,
kExprI8x16RoundingAverageU,
base::RoundingAverageUnsigned);
@@ -2437,22 +2535,28 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
// Intentionally shift by 8, should be no-op.
for (int shift = 1; shift <= 8; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int8_t* g_imm = r.builder().AddGlobal<int8_t>(kWasmS128);
+ int8_t* g_mem = r.builder().AddGlobal<int8_t>(kWasmS128);
byte value = 0;
- byte simd1 = r.AllocateLocal(kWasmS128);
- byte shift_index = r.AllocateLocal(kWasmI32);
- BUILD(r,
- WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
- WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
- WASM_GET_LOCAL(shift_index))),
- WASM_ONE);
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
+ WASM_I32V(shift))),
+ WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_GET_LOCAL(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+ r.builder().WriteMemory(&memory[0], shift);
FOR_INT8_INPUTS(x) {
r.Call(x);
int8_t expected = expected_op(x, shift);
for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_mem[i]));
}
}
}
@@ -2585,18 +2689,21 @@ void RunBinaryLaneOpTest(
}
WASM_SIMD_TEST(I32x4AddHoriz) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
// Inputs are [0 1 2 3] and [4 5 6 7].
RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
{{1, 5, 9, 13}});
}
WASM_SIMD_TEST(I16x8AddHoriz) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
// Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
{{1, 5, 9, 13, 17, 21, 25, 29}});
}
WASM_SIMD_TEST(F32x4AddHoriz) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
// Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
{{1.0f, 5.0f, 9.0f, 13.0f}});
@@ -2932,6 +3039,7 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
+ FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte zero = r.AllocateLocal(kWasmS128); \
@@ -3320,15 +3428,15 @@ void RunLoadSplatTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(S8x16LoadSplat) {
+WASM_SIMD_TEST(S8x16LoadSplat) {
RunLoadSplatTest<int8_t>(execution_tier, lower_simd, kExprS8x16LoadSplat);
}
-WASM_SIMD_TEST_NO_LOWERING(S16x8LoadSplat) {
+WASM_SIMD_TEST(S16x8LoadSplat) {
RunLoadSplatTest<int16_t>(execution_tier, lower_simd, kExprS16x8LoadSplat);
}
-WASM_SIMD_TEST_NO_LOWERING(S32x4LoadSplat) {
+WASM_SIMD_TEST(S32x4LoadSplat) {
RunLoadSplatTest<int32_t>(execution_tier, lower_simd, kExprS32x4LoadSplat);
}
@@ -3360,21 +3468,21 @@ void RunLoadExtendTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8Load8x8U) {
+WASM_SIMD_TEST(I16x8Load8x8U) {
RunLoadExtendTest<uint8_t, uint16_t>(execution_tier, lower_simd,
kExprI16x8Load8x8U);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8Load8x8S) {
+WASM_SIMD_TEST(I16x8Load8x8S) {
RunLoadExtendTest<int8_t, int16_t>(execution_tier, lower_simd,
kExprI16x8Load8x8S);
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4Load16x4U) {
+WASM_SIMD_TEST(I32x4Load16x4U) {
RunLoadExtendTest<uint16_t, uint32_t>(execution_tier, lower_simd,
kExprI32x4Load16x4U);
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4Load16x4S) {
+WASM_SIMD_TEST(I32x4Load16x4S) {
RunLoadExtendTest<int16_t, int32_t>(execution_tier, lower_simd,
kExprI32x4Load16x4S);
}
@@ -3390,9 +3498,10 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Load32x2S) {
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
+ FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
@@ -3413,6 +3522,7 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AllTrue) { \
+ FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
@@ -3431,7 +3541,7 @@ WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_ARM
+ // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(BitSelect) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -3564,7 +3674,6 @@ WASM_EXTRACT_I16x8_TEST(S, UINT16) WASM_EXTRACT_I16x8_TEST(I, INT16)
#undef WASM_EXTRACT_I8x16_TEST
#undef WASM_SIMD_TEST
-#undef WASM_SIMD_TEST_WITH_LIFTOFF
#undef WASM_SIMD_CHECK_LANE_S
#undef WASM_SIMD_CHECK_LANE_U
#undef TO_BYTE
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index b84322f190..1aebac3c76 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -1571,8 +1571,7 @@ WASM_EXEC_TEST(LoadMem_offset_oob) {
r.builder().RandomizeMemory(1116 + static_cast<int>(m));
constexpr byte offset = 8;
- uint32_t boundary =
- num_bytes - offset - ValueTypes::MemSize(machineTypes[m]);
+ uint32_t boundary = num_bytes - offset - machineTypes[m].MemSize();
BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], offset, WASM_GET_LOCAL(0)),
WASM_DROP, WASM_ZERO);
@@ -1718,7 +1717,7 @@ WASM_EXEC_TEST(StoreMem_offset_oob) {
WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
WASM_ZERO);
- byte memsize = ValueTypes::MemSize(machineTypes[m]);
+ byte memsize = machineTypes[m].MemSize();
uint32_t boundary = num_bytes - 8 - memsize;
CHECK_EQ(0, r.Call(boundary)); // in bounds.
CHECK_EQ(0, memcmp(&memory[0], &memory[8 + boundary], memsize));
@@ -2009,7 +2008,7 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
compiler::Graph graph(&zone);
compiler::JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr,
&machine);
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig->parameter_count() == 1) {
byte code[] = {WASM_NO_LOCALS, kExprLocalGet, 0, static_cast<byte>(opcode),
@@ -2643,9 +2642,9 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
// Build the selector function.
// =========================================================================
FunctionSig::Builder b(&zone, 1, num_params);
- b.AddReturn(ValueTypes::ValueTypeFor(result));
+ b.AddReturn(ValueType::For(result));
for (int i = 0; i < num_params; ++i) {
- b.AddParam(ValueTypes::ValueTypeFor(memtypes[i]));
+ b.AddParam(ValueType::For(memtypes[i]));
}
WasmFunctionCompiler& t = r.NewFunction(b.Build());
BUILD(t, WASM_GET_LOCAL(which));
@@ -2665,7 +2664,7 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
ADD_CODE(code, WASM_CALL_FUNCTION0(t.function_index()));
// Store the result in a local.
- byte local_index = r.AllocateLocal(ValueTypes::ValueTypeFor(result));
+ byte local_index = r.AllocateLocal(ValueType::For(result));
ADD_CODE(code, kExprLocalSet, local_index);
// Store the result in memory.
@@ -2682,7 +2681,7 @@ static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
r.builder().RandomizeMemory();
CHECK_EQ(kExpected, r.Call());
- int size = ValueTypes::MemSize(result);
+ int size = result.MemSize();
for (int i = 0; i < size; ++i) {
int base = (which + 1) * kElemSize;
byte expected = r.builder().raw_mem_at<byte>(base + i);
@@ -2740,7 +2739,7 @@ WASM_EXEC_TEST(MultiReturnSub) {
template <typename T>
void RunMultiReturnSelect(ExecutionTier execution_tier, const T* inputs) {
EXPERIMENTAL_FLAG_SCOPE(mv);
- ValueType type = ValueTypes::ValueTypeFor(MachineTypeForC<T>());
+ ValueType type = ValueType::For(MachineTypeForC<T>());
ValueType storage[] = {type, type, type, type, type, type};
const size_t kNumReturns = 2;
const size_t kNumParams = arraysize(storage) - kNumReturns;
@@ -3498,7 +3497,7 @@ void BinOpOnDifferentRegisters(
for (int i = 0; i < num_locals; ++i) {
ADD_CODE(
init_locals_code,
- WASM_SET_LOCAL(i, WASM_LOAD_MEM(ValueTypes::MachineTypeFor(type),
+ WASM_SET_LOCAL(i, WASM_LOAD_MEM(type.machine_type(),
WASM_I32V_2(sizeof(ctype) * i))));
}
// {write_locals_code} is shared by all code generated in the loop below.
@@ -3506,7 +3505,7 @@ void BinOpOnDifferentRegisters(
// Write locals back into memory, shifted by one element to the right.
for (int i = 0; i < num_locals; ++i) {
ADD_CODE(write_locals_code,
- WASM_STORE_MEM(ValueTypes::MachineTypeFor(type),
+ WASM_STORE_MEM(type.machine_type(),
WASM_I32V_2(sizeof(ctype) * (i + 1)),
WASM_GET_LOCAL(i)));
}
@@ -3521,7 +3520,7 @@ void BinOpOnDifferentRegisters(
std::vector<byte> code(init_locals_code);
ADD_CODE(code,
// Store the result of the binary operation at memory[0].
- WASM_STORE_MEM(ValueTypes::MachineTypeFor(type), WASM_ZERO,
+ WASM_STORE_MEM(type.machine_type(), WASM_ZERO,
WASM_BINOP(opcode, WASM_GET_LOCAL(lhs),
WASM_GET_LOCAL(rhs))),
// Return 0.
@@ -3753,9 +3752,9 @@ TEST(Liftoff_tier_up) {
memcpy(buffer.get(), sub_code->instructions().begin(), sub_size);
desc.buffer = buffer.get();
desc.instr_size = static_cast<int>(sub_size);
- std::unique_ptr<WasmCode> new_code = native_module->AddCode(
- add.function_index(), desc, 0, 0, {}, OwnedVector<byte>(),
- WasmCode::kFunction, ExecutionTier::kTurbofan);
+ std::unique_ptr<WasmCode> new_code =
+ native_module->AddCode(add.function_index(), desc, 0, 0, {}, {},
+ WasmCode::kFunction, ExecutionTier::kTurbofan);
native_module->PublishCode(std::move(new_code));
// Second run should now execute {sub}.
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 08c0c1dd46..4d3f83daff 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -1215,6 +1215,39 @@ STREAM_TEST(TestCompileErrorFunctionName) {
}
}
+STREAM_TEST(TestSetModuleCodeSection) {
+ StreamTester tester;
+
+ uint8_t code[] = {
+ U32V_1(1), // functions count
+ U32V_1(4), // body size
+ U32V_1(0), // locals count
+ kExprLocalGet, 0, kExprEnd // body
+ };
+
+ const uint8_t bytes[] = {
+ WASM_MODULE_HEADER, // module header
+ kTypeSectionCode, // section code
+ U32V_1(1 + SIZEOF_SIG_ENTRY_x_x), // section size
+ U32V_1(1), // type count
+ SIG_ENTRY_x_x(kLocalI32, kLocalI32), // signature entry
+ kFunctionSectionCode, // section code
+ U32V_1(1 + 1), // section size
+ U32V_1(1), // functions count
+ 0, // signature index
+ kCodeSectionCode, // section code
+ U32V_1(arraysize(code)), // section size
+ };
+
+ tester.OnBytesReceived(bytes, arraysize(bytes));
+ tester.OnBytesReceived(code, arraysize(code));
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK_EQ(tester.native_module()->module()->code.offset(), arraysize(bytes));
+ CHECK_EQ(tester.native_module()->module()->code.length(), arraysize(code));
+ CHECK(tester.IsPromiseFulfilled());
+}
+
#undef STREAM_TEST
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 79c4c8eaec..9f011ecf38 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -139,6 +139,7 @@ class BreakHandler : public debug::DebugDelegate {
Handle<BreakPoint> SetBreakpoint(WasmRunnerBase* runner, int function_index,
int byte_offset,
int expected_set_byte_offset = -1) {
+ runner->TierDown();
int func_offset =
runner->builder().GetFunctionAt(function_index)->code.offset();
int code_offset = func_offset + byte_offset;
@@ -190,17 +191,17 @@ struct WasmValWrapper {
// Only needed in debug builds. Avoid unused warning otherwise.
#ifdef DEBUG
std::ostream& operator<<(std::ostream& out, const WasmValWrapper& wrapper) {
- switch (wrapper.val.type()) {
- case kWasmI32:
+ switch (wrapper.val.type().kind()) {
+ case ValueType::kI32:
out << "i32: " << wrapper.val.to<int32_t>();
break;
- case kWasmI64:
+ case ValueType::kI64:
out << "i64: " << wrapper.val.to<int64_t>();
break;
- case kWasmF32:
+ case ValueType::kF32:
out << "f32: " << wrapper.val.to<float>();
break;
- case kWasmF64:
+ case ValueType::kF64:
out << "f64: " << wrapper.val.to<double>();
break;
default:
diff --git a/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc b/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
new file mode 100644
index 0000000000..5a7b2bc201
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
@@ -0,0 +1,315 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <initializer_list>
+
+#include "src/api/api-inl.h"
+#include "src/base/macros.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/compiler/heap-refs.h"
+#include "src/debug/debug-evaluate.h"
+#include "src/debug/debug-interface.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/frames.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/property-descriptor.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-debug-evaluate.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-tier.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+template <typename... FunctionArgsT>
+class TestCode {
+ public:
+ TestCode(WasmRunnerBase* runner, std::initializer_list<byte> code)
+ : compiler_(&runner->NewFunction<FunctionArgsT...>()), code_(code) {
+ compiler_->Build(code.begin(), code.end());
+ }
+
+ Handle<BreakPoint> BreakOnReturn(WasmRunnerBase* runner) {
+ runner->TierDown();
+ uint32_t return_offset_in_function = FindReturn();
+
+ int function_index = compiler_->function_index();
+ int function_offset =
+ runner->builder().GetFunctionAt(function_index)->code.offset();
+ int return_offset_in_module = function_offset + return_offset_in_function;
+
+ Handle<WasmInstanceObject> instance = runner->builder().instance_object();
+ Handle<Script> script(instance->module_object().script(),
+ runner->main_isolate());
+ static int break_index = 0;
+ Handle<BreakPoint> break_point =
+ runner->main_isolate()->factory()->NewBreakPoint(
+ break_index++, runner->main_isolate()->factory()->empty_string());
+ CHECK(WasmScript::SetBreakPoint(script, &return_offset_in_module,
+ break_point));
+ int set_breakpoint_offset = return_offset_in_module - function_offset;
+ // Also set breakpoint on the debug info of the instance directly, since
+ // the instance chain is not set up properly in tests.
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::SetBreakpoint(debug_info, function_index,
+ set_breakpoint_offset);
+
+ return break_point;
+ }
+
+ MaybeHandle<Object> Run(WasmRunnerBase* runner) {
+ Isolate* isolate = runner->main_isolate();
+ Handle<JSFunction> fun_wrapper =
+ runner->builder().WrapCode(compiler_->function_index());
+ Handle<Object> global(isolate->context().global_object(), isolate);
+ return Execution::Call(isolate, fun_wrapper, global, 0, nullptr);
+ }
+
+ private:
+ uint32_t FindReturn() const {
+ for (auto i = code_.begin(); i != code_.end();
+ i += OpcodeLength(&*i, &*code_.end())) {
+ if (*i == kExprReturn) {
+ return static_cast<uint32_t>(std::distance(code_.begin(), i));
+ }
+ }
+ UNREACHABLE();
+ }
+
+ WasmFunctionCompiler* compiler_;
+ std::vector<byte> code_;
+};
+
+class WasmEvaluatorBuilder {
+ public:
+ explicit WasmEvaluatorBuilder(ExecutionTier execution_tier,
+ uint32_t min_memory = 1,
+ uint32_t max_memory = 1)
+ : zone_(&allocator_, ZONE_NAME), builder_(&zone_) {
+ get_memory_function_index = AddImport<void, uint32_t, uint32_t, uint32_t>(
+ CStrVector("__getMemory"));
+ wasm_format_function =
+ builder_.AddFunction(WasmRunnerBase::CreateSig<uint32_t>(&zone_));
+ wasm_format_function->SetName(CStrVector("wasm_format"));
+ builder_.AddExport(CStrVector("wasm_format"), wasm_format_function);
+ builder_.SetMinMemorySize(min_memory);
+ builder_.SetMaxMemorySize(max_memory);
+ }
+
+ template <typename ReturnT, typename... ArgTs>
+ uint32_t AddImport(Vector<const char> name) {
+ return builder_.AddImport(
+ name, WasmRunnerBase::CreateSig<ReturnT, ArgTs...>(&zone_));
+ }
+
+ void push_back(std::initializer_list<byte> code) {
+ wasm_format_function->EmitCode(code.begin(),
+ static_cast<uint32_t>(code.size()));
+ }
+
+ void CallGetMemory(std::initializer_list<byte> args) {
+ push_back(args);
+ push_back({WASM_CALL_FUNCTION0(wasm_format_function->func_index())});
+ }
+
+ ZoneBuffer bytes() {
+ ZoneBuffer bytes(&zone_);
+ builder_.WriteTo(&bytes);
+ return bytes;
+ }
+
+ private:
+ v8::internal::AccountingAllocator allocator_;
+ Zone zone_;
+ WasmModuleBuilder builder_;
+ uint32_t get_memory_function_index = 0;
+ WasmFunctionBuilder* wasm_format_function = nullptr;
+};
+
+class WasmBreakHandler : public debug::DebugDelegate {
+ public:
+ struct EvaluationResult {
+ Maybe<std::string> result = Nothing<std::string>();
+ Maybe<std::string> error = Nothing<std::string>();
+ };
+
+ WasmBreakHandler(Isolate* isolate, ZoneBuffer evaluator_bytes)
+ : isolate_(isolate),
+ evaluator_bytes_(std::move(evaluator_bytes)),
+ result_(Nothing<EvaluationResult>()) {
+ v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_), this);
+ }
+
+ ~WasmBreakHandler() override {
+ v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_),
+ nullptr);
+ }
+
+ const Maybe<EvaluationResult>& result() const { return result_; }
+
+ private:
+ Isolate* isolate_;
+ ZoneBuffer evaluator_bytes_;
+ Maybe<EvaluationResult> result_;
+
+ Maybe<std::string> GetPendingExceptionAsString() {
+ if (!isolate_->has_pending_exception()) return Nothing<std::string>();
+ Handle<Object> exception(isolate_->pending_exception(), isolate_);
+ isolate_->clear_pending_exception();
+
+ Handle<String> exception_string;
+ if (!Object::ToString(isolate_, exception).ToHandle(&exception_string)) {
+ return Just<std::string>("");
+ }
+ return Just<std::string>(exception_string->ToCString().get());
+ }
+
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ const std::vector<int>&) override {
+ // Check the current position.
+ StackTraceFrameIterator frame_it(isolate_);
+
+ FrameSummary::WasmInterpretedFrameSummary summary =
+ FrameSummary::GetTop(frame_it.frame()).AsWasmInterpreted();
+ Handle<WasmInstanceObject> instance = summary.wasm_instance();
+ WasmInterpreter::FramePtr frame =
+ instance->debug_info().GetInterpretedFrame(frame_it.frame()->fp(), 0);
+
+ MaybeHandle<String> result_handle = v8::internal::wasm::DebugEvaluate(
+ {evaluator_bytes_.begin(), evaluator_bytes_.size()}, instance,
+ std::move(frame));
+
+ Maybe<std::string> error_message = GetPendingExceptionAsString();
+ Maybe<std::string> result_message =
+ result_handle.is_null()
+ ? Nothing<std::string>()
+ : Just<std::string>(
+ result_handle.ToHandleChecked()->ToCString().get());
+
+ isolate_->clear_pending_exception();
+ result_ = Just<EvaluationResult>({result_message, error_message});
+ }
+};
+
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_CompileFailed) {
+ WasmRunner<int> runner(execution_tier);
+
+ TestCode<int> code(&runner, {WASM_RETURN1(WASM_I32V_1(32))});
+ code.BreakOnReturn(&runner);
+
+ WasmEvaluatorBuilder evaluator(execution_tier);
+ // Create a module that doesn't compile by missing the END bytecode
+ evaluator.push_back({WASM_RETURN1(WASM_I32V_1(33))});
+
+ Isolate* isolate = runner.main_isolate();
+ WasmBreakHandler break_handler(isolate, evaluator.bytes());
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK(result.result.IsNothing());
+ CHECK_NE(result.error.ToChecked().find(
+ "function body must end with \"end\" opcode"),
+ std::string::npos);
+}
+
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_MissingEntrypoint) {
+ WasmRunner<int> runner(execution_tier);
+
+ TestCode<int> code(&runner, {WASM_RETURN1(WASM_I32V_1(32))});
+ code.BreakOnReturn(&runner);
+
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ WasmModuleBuilder evaluator(&zone);
+ ZoneBuffer evaluator_bytes(&zone);
+ evaluator.WriteTo(&evaluator_bytes);
+
+ Isolate* isolate = runner.main_isolate();
+ WasmBreakHandler break_handler(isolate, std::move(evaluator_bytes));
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK(result.result.IsNothing());
+ CHECK_NE(result.error.ToChecked().find("Missing export: \"wasm_format\""),
+ std::string::npos);
+}
+
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_ExecuteFailed_SEGV) {
+ WasmRunner<int> runner(execution_tier);
+ runner.builder().AddMemoryElems<int32_t>(64);
+
+ TestCode<int> code(&runner, {WASM_RETURN1(WASM_I32V_1(32))});
+
+ // Create a module that doesn't compile by missing the END bytecode
+ WasmEvaluatorBuilder evaluator(execution_tier);
+ code.BreakOnReturn(&runner);
+
+ // Load 1 byte from an address that's too high
+ evaluator.CallGetMemory(
+ {WASM_I32V_1(32), WASM_I32V_1(1), WASM_I32V_3((1 << 16) + 1)});
+ evaluator.push_back({WASM_RETURN1(WASM_I32V_1(33)), WASM_END});
+
+ Isolate* isolate = runner.main_isolate();
+ WasmBreakHandler break_handler(isolate, evaluator.bytes());
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK(result.result.IsNothing());
+ CHECK_NE(
+ result.error.ToChecked().find("Illegal access to out-of-bounds memory"),
+ std::string::npos);
+}
+
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_LinearMemory) {
+ WasmRunner<int> runner(execution_tier);
+ runner.builder().AddMemoryElems<int32_t>(64);
+
+ TestCode<int> code(
+ &runner,
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_I32V_1(32), WASM_I32V_2('A')),
+ WASM_RETURN1(WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(32)))});
+ code.BreakOnReturn(&runner);
+
+ WasmEvaluatorBuilder evaluator(execution_tier);
+ // Load 4 bytes from debuggee memory at address 32, and store at the offset 33
+ // of the linear memory.
+ evaluator.CallGetMemory({WASM_I32V_1(32), WASM_I32V_1(4), WASM_I32V_1(33)});
+ evaluator.push_back({WASM_RETURN1(WASM_I32V_1(33)), WASM_END});
+
+ Isolate* isolate = runner.main_isolate();
+ WasmBreakHandler break_handler(isolate, evaluator.bytes());
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK(result.error.IsNothing());
+ CHECK_EQ(result.result.ToChecked(), "A");
+}
+
+} // namespace
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 1ee1b3dfc6..864b8885a2 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -4,6 +4,7 @@
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -151,6 +152,63 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
expected_exceptions);
}
+// Trigger a trap in wasm, stack should contain a source url.
+WASM_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
+ // Create a WasmRunner with stack checks and traps enabled.
+ WasmRunner<int> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
+
+ std::vector<byte> code(1, kExprUnreachable);
+ r.Build(code.data(), code.data() + code.size());
+
+ WasmFunctionCompiler& f = r.NewFunction<int>("call_main");
+ BUILD(f, WASM_CALL_FUNCTION0(0));
+ uint32_t wasm_index = f.function_index();
+
+ Handle<JSFunction> js_wasm_wrapper = r.builder().WrapCode(wasm_index);
+
+ Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CompileRun("(function callFn(fn) { fn(); })"))));
+
+ Isolate* isolate = js_wasm_wrapper->GetIsolate();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kOverview);
+
+ // Set the wasm script source url.
+ const char* url = "http://example.com/example.wasm";
+ const Handle<String> source_url =
+ isolate->factory()->InternalizeUtf8String(url);
+ r.builder().instance_object()->module_object().script().set_source_url(
+ *source_url);
+
+ // Run the js wrapper.
+ Handle<Object> global(isolate->context().global_object(), isolate);
+ MaybeHandle<Object> maybe_exc;
+ Handle<Object> args[] = {js_wasm_wrapper};
+ MaybeHandle<Object> maybe_return_obj =
+ Execution::TryCall(isolate, js_trampoline, global, 1, args,
+ Execution::MessageHandling::kReport, &maybe_exc);
+
+ CHECK(maybe_return_obj.is_null());
+ Handle<Object> exception = maybe_exc.ToHandleChecked();
+
+ // Extract stack trace from the exception.
+ Handle<FixedArray> stack_trace_object =
+ isolate->GetDetailedStackTrace(Handle<JSObject>::cast(exception));
+ CHECK(!stack_trace_object.is_null());
+ Handle<StackTraceFrame> stack_frame = Handle<StackTraceFrame>::cast(
+ handle(stack_trace_object->get(0), isolate));
+
+ MaybeHandle<String> maybe_stack_trace_str =
+ SerializeStackTraceFrame(isolate, stack_frame);
+ CHECK(!maybe_stack_trace_str.is_null());
+ Handle<String> stack_trace_str = maybe_stack_trace_str.ToHandleChecked();
+
+ // Check if the source_url is part of the stack trace.
+ CHECK_NE(std::string(stack_trace_str->ToCString().get()).find(url),
+ std::string::npos);
+}
+
// Trigger a trap in wasm, stack should be JS -> wasm -> wasm.
WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
for (int pos_shift = 0; pos_shift < 3; ++pos_shift) {
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index e2e6458da8..96980e6df7 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -8,14 +8,45 @@
#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/leb-helper.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
+template <>
+void AppendSingle(std::vector<byte>* code, WasmOpcode op) {
+ // We do not yet have opcodes that take up more than 2 byte (decoded). But if
+ // that changes, this will need to be updated.
+ DCHECK_EQ(0, op >> 16);
+ byte prefix = (op >> 8) & 0xff;
+ byte opcode = op & 0xff;
+
+ if (!prefix) {
+ code->push_back(opcode);
+ return;
+ }
+
+ // Ensure the prefix is really one of the supported prefixed opcodes.
+ DCHECK(WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(prefix)));
+ code->push_back(prefix);
+
+ // Decoded opcodes fit in a byte (0x00-0xff).
+ DCHECK_LE(LEBHelper::sizeof_u32v(opcode), 2);
+ // Therefore, the encoding needs max 2 bytes.
+ uint8_t encoded[2];
+ uint8_t* d = encoded;
+ // d is updated to after the last uint8_t written.
+ LEBHelper::write_u32v(&d, opcode);
+ for (uint8_t* p = encoded; p < d; p++) {
+ code->push_back(*p);
+ }
+}
+
TestingModuleBuilder::TestingModuleBuilder(
Zone* zone, ManuallyImportedJSFunction* maybe_import, ExecutionTier tier,
RuntimeExceptionSupport exception_support, LowerSimd lower_simd)
@@ -69,6 +100,12 @@ TestingModuleBuilder::TestingModuleBuilder(
}
}
+TestingModuleBuilder::~TestingModuleBuilder() {
+ // When the native module dies and is erased from the cache, it is expected to
+ // have either valid bytes or no bytes at all.
+ native_module_->SetWireBytes({});
+}
+
byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
CHECK(!test_module_->has_memory);
CHECK_NULL(mem_start_);
@@ -99,7 +136,8 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
return mem_start_;
}
-uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name,
+uint32_t TestingModuleBuilder::AddFunction(const FunctionSig* sig,
+ const char* name,
FunctionType type) {
if (test_module_->functions.size() == 0) {
// TODO(titzer): Reserving space here to avoid the underlying WasmFunction
@@ -107,7 +145,13 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name,
test_module_->functions.reserve(kMaxFunctions);
}
uint32_t index = static_cast<uint32_t>(test_module_->functions.size());
- test_module_->functions.push_back({sig, index, 0, {0, 0}, false, false});
+ test_module_->functions.push_back({sig, // sig
+ index, // func_index
+ 0, // sig_index
+ {0, 0}, // code
+ false, // imported
+ false, // exported
+ false}); // declared
if (type == kImport) {
DCHECK_EQ(0, test_module_->num_declared_functions);
++test_module_->num_imported_functions;
@@ -131,8 +175,9 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name,
sig);
std::unique_ptr<wasm::WasmCode> code = native_module_->AddCode(
index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kInterpreterEntry,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kInterpreterEntry,
wasm::ExecutionTier::kInterpreter);
native_module_->PublishCode(std::move(code));
}
@@ -228,7 +273,7 @@ uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
return bytes_offset;
}
-uint32_t TestingModuleBuilder::AddException(FunctionSig* sig) {
+uint32_t TestingModuleBuilder::AddException(const FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
uint32_t index = static_cast<uint32_t>(test_module_->exceptions.size());
test_module_->exceptions.push_back(WasmException{sig});
@@ -283,7 +328,7 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment(
uint32_t index = static_cast<uint32_t>(test_module_->elem_segments.size());
DCHECK_EQ(index, dropped_elem_segments_.size());
- test_module_->elem_segments.emplace_back();
+ test_module_->elem_segments.emplace_back(false);
auto& elem_segment = test_module_->elem_segments.back();
elem_segment.entries = entries;
@@ -293,18 +338,22 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment(
return index;
}
-CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
+CompilationEnv TestingModuleBuilder::CreateCompilationEnv(
+ AssumeDebugging debug) {
// This is a hack so we don't need to call
// trap_handler::IsTrapHandlerEnabled().
const bool is_trap_handler_enabled =
V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler;
return {test_module_ptr_,
is_trap_handler_enabled ? kUseTrapHandler : kNoTrapHandler,
- runtime_exception_support_, enabled_features_, lower_simd()};
+ runtime_exception_support_,
+ enabled_features_,
+ lower_simd(),
+ debug};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
- byte size = ValueTypes::MemSize(ValueTypes::MachineTypeFor(type));
+ byte size = type.element_size_bytes();
global_offset = (global_offset + size - 1) & ~(size - 1); // align
test_module_->globals.push_back(
{type, true, WasmInitExpr(), {global_offset}, false, false});
@@ -342,7 +391,7 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
}
void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
- Zone* zone, FunctionSig* sig,
+ Zone* zone, const FunctionSig* sig,
const byte* start, const byte* end) {
WasmFeatures unused_detected_features;
FunctionBody body(sig, 0, start, end);
@@ -369,7 +418,7 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
}
void TestBuildingGraph(Zone* zone, compiler::JSGraph* jsgraph,
- CompilationEnv* module, FunctionSig* sig,
+ CompilationEnv* module, const FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
const byte* start, const byte* end) {
compiler::WasmGraphBuilder builder(module, zone, jsgraph, sig,
@@ -542,7 +591,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
}
-WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
+WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, const FunctionSig* sig,
TestingModuleBuilder* builder,
const char* name)
: GraphAndBuilders(zone),
@@ -561,23 +610,24 @@ WasmFunctionCompiler::WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
WasmFunctionCompiler::~WasmFunctionCompiler() = default;
-FunctionSig* WasmRunnerBase::CreateSig(MachineType return_type,
+/* static */
+FunctionSig* WasmRunnerBase::CreateSig(Zone* zone, MachineType return_type,
Vector<MachineType> param_types) {
int return_count = return_type.IsNone() ? 0 : 1;
int param_count = param_types.length();
// Allocate storage array in zone.
- ValueType* sig_types = zone_.NewArray<ValueType>(return_count + param_count);
+ ValueType* sig_types = zone->NewArray<ValueType>(return_count + param_count);
// Convert machine types to local types, and check that there are no
// MachineType::None()'s in the parameters.
int idx = 0;
- if (return_count) sig_types[idx++] = ValueTypes::ValueTypeFor(return_type);
+ if (return_count) sig_types[idx++] = ValueType::For(return_type);
for (MachineType param : param_types) {
CHECK_NE(MachineType::None(), param);
- sig_types[idx++] = ValueTypes::ValueTypeFor(param);
+ sig_types[idx++] = ValueType::For(param);
}
- return new (&zone_) FunctionSig(return_count, param_count, sig_types);
+ return new (zone) FunctionSig(return_count, param_count, sig_types);
}
// static
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index b66f6ffa36..d1bc9293b6 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -75,9 +75,45 @@ using compiler::Node;
r.Build(code, code + arraysize(code)); \
} while (false)
+template <typename T>
+void AppendSingle(std::vector<byte>* code, T t) {
+ static_assert(std::is_integral<T>::value,
+ "Special types need specializations");
+ code->push_back(t);
+}
+
+// Specialized for WasmOpcode.
+template <>
+void AppendSingle<WasmOpcode>(std::vector<byte>* code, WasmOpcode op);
+
+template <typename... T>
+void Append(std::vector<byte>* code, T... ts) {
+ static_assert(sizeof...(ts) == 0, "Base case for appending bytes to code.");
+}
+
+template <typename First, typename... Rest>
+void Append(std::vector<byte>* code, First first, Rest... rest) {
+ AppendSingle(code, first);
+ Append(code, rest...);
+}
+
+// Like BUILD but pushes code bytes into a std::vector instead of an array
+// initializer. This is useful for opcodes (like SIMD), that are LEB128
+// (variable-sized). We use recursive template instantiations with variadic
+// template arguments, so that the Append calls can handle either bytes or
+// opcodes. AppendSingle is specialized for WasmOpcode, and appends multiple
+// bytes. This allows existing callers to swap out the BUILD macro for BUILD_V
+// macro without changes. Also see https://crbug.com/v8/10258.
+#define BUILD_V(r, ...) \
+ do { \
+ std::vector<byte> code; \
+ Append(&code, __VA_ARGS__); \
+ r.Build(code.data(), code.data() + code.size()); \
+ } while (false)
+
// For tests that must manually import a JSFunction with source code.
struct ManuallyImportedJSFunction {
- FunctionSig* sig;
+ const FunctionSig* sig;
Handle<JSFunction> js_function;
};
@@ -89,6 +125,7 @@ class TestingModuleBuilder {
public:
TestingModuleBuilder(Zone*, ManuallyImportedJSFunction*, ExecutionTier,
RuntimeExceptionSupport, LowerSimd);
+ ~TestingModuleBuilder();
void ChangeOriginToAsmjs() { test_module_->origin = kAsmJsSloppyOrigin; }
@@ -103,13 +140,12 @@ class TestingModuleBuilder {
}
template <typename T>
- T* AddGlobal(
- ValueType type = ValueTypes::ValueTypeFor(MachineTypeForC<T>())) {
+ T* AddGlobal(ValueType type = ValueType::For(MachineTypeForC<T>())) {
const WasmGlobal* global = AddGlobal(type);
return reinterpret_cast<T*>(globals_data_ + global->offset);
}
- byte AddSignature(FunctionSig* sig) {
+ byte AddSignature(const FunctionSig* sig) {
DCHECK_EQ(test_module_->signatures.size(),
test_module_->signature_ids.size());
test_module_->signatures.push_back(sig);
@@ -180,7 +216,8 @@ class TestingModuleBuilder {
void SetHasSharedMemory() { test_module_->has_shared_memory = true; }
enum FunctionType { kImport, kWasm };
- uint32_t AddFunction(FunctionSig* sig, const char* name, FunctionType type);
+ uint32_t AddFunction(const FunctionSig* sig, const char* name,
+ FunctionType type);
// Freezes the signature map of the module and allocates the storage for
// export wrappers.
@@ -196,7 +233,7 @@ class TestingModuleBuilder {
uint32_t AddBytes(Vector<const byte> bytes);
- uint32_t AddException(FunctionSig* sig);
+ uint32_t AddException(const FunctionSig* sig);
uint32_t AddPassiveDataSegment(Vector<const byte> bytes);
uint32_t AddPassiveElementSegment(const std::vector<uint32_t>& entries);
@@ -221,7 +258,10 @@ class TestingModuleBuilder {
void SetExecutable() { native_module_->SetExecutable(true); }
- CompilationEnv CreateCompilationEnv();
+ void TierDown() { native_module_->TierDown(isolate_); }
+
+ enum AssumeDebugging : bool { kDebug = true, kNoDebug = false };
+ CompilationEnv CreateCompilationEnv(AssumeDebugging = kNoDebug);
ExecutionTier execution_tier() const { return execution_tier_; }
@@ -257,7 +297,7 @@ class TestingModuleBuilder {
};
void TestBuildingGraph(Zone* zone, compiler::JSGraph* jsgraph,
- CompilationEnv* module, FunctionSig* sig,
+ CompilationEnv* module, const FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
const byte* start, const byte* end);
@@ -336,11 +376,11 @@ class WasmFunctionCompiler : public compiler::GraphAndBuilders {
private:
friend class WasmRunnerBase;
- WasmFunctionCompiler(Zone* zone, FunctionSig* sig,
+ WasmFunctionCompiler(Zone* zone, const FunctionSig* sig,
TestingModuleBuilder* builder, const char* name);
compiler::JSGraph jsgraph;
- FunctionSig* sig;
+ const FunctionSig* sig;
// The call descriptor is initialized when the function is compiled.
CallDescriptor* descriptor_;
TestingModuleBuilder* builder_;
@@ -382,7 +422,7 @@ class WasmRunnerBase : public HandleAndZoneScope {
// Resets the state for building the next function.
// The main function called will be the last generated function.
// Returns the index of the previously built function.
- WasmFunctionCompiler& NewFunction(FunctionSig* sig,
+ WasmFunctionCompiler& NewFunction(const FunctionSig* sig,
const char* name = nullptr) {
functions_.emplace_back(
new WasmFunctionCompiler(&zone_, sig, &builder_, name));
@@ -406,18 +446,25 @@ class WasmRunnerBase : public HandleAndZoneScope {
bool interpret() { return builder_.interpret(); }
+ void TierDown() { builder_.TierDown(); }
+
template <typename ReturnType, typename... ParamTypes>
FunctionSig* CreateSig() {
+ return WasmRunnerBase::CreateSig<ReturnType, ParamTypes...>(&zone_);
+ }
+
+ template <typename ReturnType, typename... ParamTypes>
+ static FunctionSig* CreateSig(Zone* zone) {
std::array<MachineType, sizeof...(ParamTypes)> param_machine_types{
{MachineTypeForC<ParamTypes>()...}};
Vector<MachineType> param_vec(param_machine_types.data(),
param_machine_types.size());
- return CreateSig(MachineTypeForC<ReturnType>(), param_vec);
+ return CreateSig(zone, MachineTypeForC<ReturnType>(), param_vec);
}
private:
- FunctionSig* CreateSig(MachineType return_type,
- Vector<MachineType> param_types);
+ static FunctionSig* CreateSig(Zone* zone, MachineType return_type,
+ Vector<MachineType> param_types);
protected:
v8::internal::AccountingAllocator allocator_;
@@ -559,6 +606,10 @@ class WasmRunner : public WasmRunnerBase {
CheckCallApplyViaJS(expected, function()->func_index, buffer, sizeof...(p));
}
+ void CheckCallViaJSTraps(ParamTypes... p) {
+ CheckCallViaJS(static_cast<double>(0xDEADBEEF), p...);
+ }
+
void CheckUsedExecutionTier(ExecutionTier expected_tier) {
// Liftoff can fail and fallback to Turbofan, so check that the function
// gets compiled by the tier requested, to guard against accidental success.
diff --git a/deps/v8/test/common/types-fuzz.h b/deps/v8/test/common/types-fuzz.h
index 21a7b7cafc..eacedd376a 100644
--- a/deps/v8/test/common/types-fuzz.h
+++ b/deps/v8/test/common/types-fuzz.h
@@ -53,29 +53,34 @@ class Types {
SignedSmall = Type::SignedSmall();
UnsignedSmall = Type::UnsignedSmall();
- object_map =
+ Handle<i::Map> object_map =
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-
- smi = handle(Smi::FromInt(666), isolate);
- boxed_smi = isolate->factory()->NewHeapNumber(666);
- signed32 = isolate->factory()->NewHeapNumber(0x40000000);
- float1 = isolate->factory()->NewHeapNumber(1.53);
- float2 = isolate->factory()->NewHeapNumber(0.53);
+ Handle<i::Smi> smi = handle(Smi::FromInt(666), isolate);
+ Handle<i::HeapNumber> boxed_smi = isolate->factory()->NewHeapNumber(666);
+ Handle<i::HeapNumber> signed32 =
+ isolate->factory()->NewHeapNumber(0x40000000);
+ Handle<i::HeapNumber> float1 = isolate->factory()->NewHeapNumber(1.53);
+ Handle<i::HeapNumber> float2 = isolate->factory()->NewHeapNumber(0.53);
// float3 is identical to float1 in order to test that OtherNumberConstant
// types are equal by double value and not by handle pointer value.
- float3 = isolate->factory()->NewHeapNumber(1.53);
- object1 = isolate->factory()->NewJSObjectFromMap(object_map);
- object2 = isolate->factory()->NewJSObjectFromMap(object_map);
- array = isolate->factory()->NewJSArray(20);
- uninitialized = isolate->factory()->uninitialized_value();
- SmiConstant = Type::NewConstant(js_heap_broker(), smi, zone);
- Signed32Constant = Type::NewConstant(js_heap_broker(), signed32, zone);
-
- ObjectConstant1 = Type::HeapConstant(js_heap_broker(), object1, zone);
- ObjectConstant2 = Type::HeapConstant(js_heap_broker(), object2, zone);
- ArrayConstant = Type::HeapConstant(js_heap_broker(), array, zone);
+ Handle<i::HeapNumber> float3 = isolate->factory()->NewHeapNumber(1.53);
+ Handle<i::JSObject> object1 =
+ isolate->factory()->NewJSObjectFromMap(object_map);
+ Handle<i::JSObject> object2 =
+ isolate->factory()->NewJSObjectFromMap(object_map);
+ Handle<i::JSArray> array = isolate->factory()->NewJSArray(20);
+ Handle<i::Oddball> uninitialized =
+ isolate->factory()->uninitialized_value();
+ Handle<i::Oddball> undefined = isolate->factory()->undefined_value();
+ Handle<i::HeapNumber> nan = isolate->factory()->nan_value();
+
+ SmiConstant = Type::Constant(js_heap_broker(), smi, zone);
+ Signed32Constant = Type::Constant(js_heap_broker(), signed32, zone);
+ ObjectConstant1 = Type::Constant(js_heap_broker(), object1, zone);
+ ObjectConstant2 = Type::Constant(js_heap_broker(), object2, zone);
+ ArrayConstant = Type::Constant(js_heap_broker(), array, zone);
UninitializedConstant =
- Type::HeapConstant(js_heap_broker(), uninitialized, zone);
+ Type::Constant(js_heap_broker(), uninitialized, zone);
values.push_back(smi);
values.push_back(boxed_smi);
@@ -84,11 +89,13 @@ class Types {
values.push_back(object2);
values.push_back(array);
values.push_back(uninitialized);
+ values.push_back(undefined);
+ values.push_back(nan);
values.push_back(float1);
values.push_back(float2);
values.push_back(float3);
for (ValueVector::iterator it = values.begin(); it != values.end(); ++it) {
- types.push_back(Type::NewConstant(js_heap_broker(), *it, zone));
+ types.push_back(Type::Constant(js_heap_broker(), *it, zone));
}
integers.push_back(isolate->factory()->NewNumber(-V8_INFINITY));
@@ -109,19 +116,6 @@ class Types {
}
}
- Handle<i::Map> object_map;
-
- Handle<i::Smi> smi;
- Handle<i::HeapNumber> boxed_smi;
- Handle<i::HeapNumber> signed32;
- Handle<i::HeapNumber> float1;
- Handle<i::HeapNumber> float2;
- Handle<i::HeapNumber> float3;
- Handle<i::JSObject> object1;
- Handle<i::JSObject> object2;
- Handle<i::JSArray> array;
- Handle<i::Oddball> uninitialized;
-
#define DECLARE_TYPE(name, value) Type name;
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
@@ -145,12 +139,12 @@ class Types {
ValueVector values;
ValueVector integers; // "Integer" values used for range limits.
- Type NewConstant(Handle<i::Object> value) {
- return Type::NewConstant(js_heap_broker(), value, zone_);
+ Type Constant(Handle<i::Object> value) {
+ return Type::Constant(js_heap_broker(), value, zone_);
}
Type HeapConstant(Handle<i::HeapObject> value) {
- return Type::HeapConstant(js_heap_broker(), value, zone_);
+ return Type::Constant(js_heap_broker(), value, zone_);
}
Type Range(double min, double max) { return Type::Range(min, max, zone_); }
@@ -188,7 +182,7 @@ class Types {
}
case 1: { // constant
int i = rng_->NextInt(static_cast<int>(values.size()));
- return Type::NewConstant(js_heap_broker(), values[i], zone_);
+ return Type::Constant(js_heap_broker(), values[i], zone_);
}
case 2: { // range
int i = rng_->NextInt(static_cast<int>(integers.size()));
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 5a914c79a2..1da88a406a 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -27,8 +27,10 @@
#define ACTIVE_NO_INDEX 0
#define PASSIVE 1
#define ACTIVE_WITH_INDEX 2
+#define DECLARATIVE 3
#define PASSIVE_WITH_ELEMENTS 5
#define ACTIVE_WITH_ELEMENTS 6
+#define DECLARATIVE_WITH_ELEMENTS 7
// The table index field in an element segment was repurposed as a flags field.
// To specify a table index, we have to set the flag value to 2, followed by
@@ -87,9 +89,8 @@
#define WASM_BLOCK_F(...) kExprBlock, kLocalF32, __VA_ARGS__, kExprEnd
#define WASM_BLOCK_D(...) kExprBlock, kLocalF64, __VA_ARGS__, kExprEnd
-#define WASM_BLOCK_T(t, ...) \
- kExprBlock, static_cast<byte>(ValueTypes::ValueTypeCodeFor(t)), __VA_ARGS__, \
- kExprEnd
+#define WASM_BLOCK_T(t, ...) \
+ kExprBlock, static_cast<byte>((t).value_type_code()), __VA_ARGS__, kExprEnd
#define WASM_BLOCK_X(index, ...) \
kExprBlock, static_cast<byte>(index), __VA_ARGS__, kExprEnd
@@ -102,18 +103,16 @@
#define WASM_LOOP_F(...) kExprLoop, kLocalF32, __VA_ARGS__, kExprEnd
#define WASM_LOOP_D(...) kExprLoop, kLocalF64, __VA_ARGS__, kExprEnd
-#define WASM_LOOP_T(t, ...) \
- kExprLoop, static_cast<byte>(ValueTypes::ValueTypeCodeFor(t)), __VA_ARGS__, \
- kExprEnd
+#define WASM_LOOP_T(t, ...) \
+ kExprLoop, static_cast<byte>((t).value_type_code()), __VA_ARGS__, kExprEnd
#define WASM_LOOP_X(index, ...) \
kExprLoop, static_cast<byte>(index), __VA_ARGS__, kExprEnd
#define WASM_IF(cond, ...) cond, kExprIf, kLocalVoid, __VA_ARGS__, kExprEnd
-#define WASM_IF_T(t, cond, ...) \
- cond, kExprIf, static_cast<byte>(ValueTypes::ValueTypeCodeFor(t)), \
- __VA_ARGS__, kExprEnd
+#define WASM_IF_T(t, cond, ...) \
+ cond, kExprIf, static_cast<byte>((t).value_type_code()), __VA_ARGS__, kExprEnd
#define WASM_IF_X(index, cond, ...) \
cond, kExprIf, static_cast<byte>(index), __VA_ARGS__, kExprEnd
@@ -130,16 +129,16 @@
#define WASM_IF_ELSE_D(cond, tstmt, fstmt) \
cond, kExprIf, kLocalF64, tstmt, kExprElse, fstmt, kExprEnd
-#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt) \
- cond, kExprIf, static_cast<byte>(ValueTypes::ValueTypeCodeFor(t)), tstmt, \
- kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt) \
+ cond, kExprIf, static_cast<byte>((t).value_type_code()), tstmt, kExprElse, \
+ fstmt, kExprEnd
#define WASM_IF_ELSE_X(index, cond, tstmt, fstmt) \
cond, kExprIf, static_cast<byte>(index), tstmt, kExprElse, fstmt, kExprEnd
-#define WASM_TRY_CATCH_T(t, trystmt, catchstmt) \
- kExprTry, static_cast<byte>(ValueTypes::ValueTypeCodeFor(t)), trystmt, \
- kExprCatch, catchstmt, kExprEnd
+#define WASM_TRY_CATCH_T(t, trystmt, catchstmt) \
+ kExprTry, static_cast<byte>((t).value_type_code()), trystmt, kExprCatch, \
+ catchstmt, kExprEnd
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
#define WASM_SELECT_I(tval, fval, cond) \
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 5495f9330a..067188bba0 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -77,7 +77,8 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
return false;
}
int function_index = function->function_index();
- FunctionSig* signature = instance->module()->functions[function_index].sig;
+ const FunctionSig* signature =
+ instance->module()->functions[function_index].sig;
size_t param_count = signature->parameter_count();
std::unique_ptr<WasmValue[]> arguments(new WasmValue[param_count]);
@@ -88,29 +89,29 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
// Fill the parameters up with default values.
for (size_t i = argc; i < param_count; ++i) {
- switch (signature->GetParam(i)) {
- case kWasmI32:
+ switch (signature->GetParam(i).kind()) {
+ case ValueType::kI32:
arguments[i] = WasmValue(int32_t{0});
break;
- case kWasmI64:
+ case ValueType::kI64:
arguments[i] = WasmValue(int64_t{0});
break;
- case kWasmF32:
+ case ValueType::kF32:
arguments[i] = WasmValue(0.0f);
break;
- case kWasmF64:
+ case ValueType::kF64:
arguments[i] = WasmValue(0.0);
break;
- case kWasmAnyRef:
- case kWasmFuncRef:
- case kWasmNullRef:
- case kWasmExnRef:
+ case ValueType::kAnyRef:
+ case ValueType::kFuncRef:
+ case ValueType::kNullRef:
+ case ValueType::kExnRef:
arguments[i] =
WasmValue(Handle<Object>::cast(isolate->factory()->null_value()));
break;
- case kWasmStmt:
- case kWasmBottom:
- case kWasmS128:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
+ case ValueType::kS128:
UNREACHABLE();
}
}
diff --git a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
new file mode 100644
index 0000000000..e7ba10349c
--- /dev/null
+++ b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-anyref
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Create a simple Wasm module.
+function create_builder(i) {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_i_r)
+ .addBody([
+ kExprLocalGet, 0, kExprRefIsNull,
+ ...wasmI32Const(i),
+ kExprI32Add])
+ .exportFunc();
+ return builder;
+}
+
+const instance = create_builder(0).instantiate();
+
+// Test recompilation.
+const Debug = new DebugWrapper();
+Debug.enable();
+assertFalse(%IsLiftoffFunction(instance.exports.main));
+const newInstance = create_builder(1).instantiate();
+assertFalse(%IsLiftoffFunction(newInstance.exports.main));
+
+// Async.
+async function testTierDownToLiftoffAsync() {
+ Debug.disable();
+ const asyncInstance = await create_builder(2).asyncInstantiate();
+
+ // Test recompilation.
+ Debug.enable();
+ assertFalse(%IsLiftoffFunction(asyncInstance.exports.main));
+ const newAsyncInstance = await create_builder(3).asyncInstantiate();
+ assertFalse(%IsLiftoffFunction(newAsyncInstance.exports.main));
+}
+
+assertPromiseResult(testTierDownToLiftoffAsync());
diff --git a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
new file mode 100644
index 0000000000..20dc1e1c5e
--- /dev/null
+++ b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
@@ -0,0 +1,62 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+const num_functions = 200;
+
+// Create a simple Wasm script.
+function create_builder(delta = 0) {
+ const builder = new WasmModuleBuilder();
+ for (let i = 0; i < num_functions; ++i) {
+ builder.addFunction('f' + i, kSig_i_v)
+ .addBody(wasmI32Const(i + delta))
+ .exportFunc();
+ }
+ return builder;
+}
+
+function checkTieredDown(instance) {
+ for (let i = 0; i < num_functions; ++i) {
+ assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
+ }
+}
+
+function checkTieredUp(instance) {
+ // Busy waiting until all functions are tiered up.
+ let num_liftoff_functions = 0;
+ while (true) {
+ num_liftoff_functions = 0;
+ for (let i = 0; i < num_functions; ++i) {
+ if (%IsLiftoffFunction(instance.exports['f' + i])) {
+ num_liftoff_functions++;
+ }
+ }
+ if (num_liftoff_functions == 0) return;
+ }
+}
+
+const instance = create_builder().instantiate();
+const Debug = new DebugWrapper();
+Debug.enable();
+checkTieredDown(instance);
+const newInstance = create_builder(num_functions*2).instantiate();
+checkTieredDown(newInstance);
+Debug.disable();
+checkTieredUp(instance);
+checkTieredUp(newInstance);
+
+// Async.
+async function testTierDownToLiftoffAsync() {
+ const asyncInstance = await create_builder(num_functions).asyncInstantiate();
+ Debug.enable();
+ checkTieredDown(asyncInstance);
+ const newAsyncInstance = await create_builder(num_functions*3).asyncInstantiate();
+ checkTieredDown(newAsyncInstance);
+ Debug.disable();
+ checkTieredUp(asyncInstance);
+ checkTieredUp(newAsyncInstance);
+}
+
+assertPromiseResult(testTierDownToLiftoffAsync());
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index d74b55dc6d..a10c503d74 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -143,9 +143,21 @@
['arch in (s390, s390x, ppc, ppc64)', {
'regress/regress-crbug-1032042': [SKIP],
'regress/regress-crbug-840288': [SKIP],
- 'debug/wasm/debug-step-into-wasm': [SKIP],
'debug/wasm/asm-debug': [SKIP],
+ 'debug/wasm/debug-enabled-tier-down-wasm': [SKIP],
+ 'debug/wasm/debug-step-into-wasm': [SKIP],
'debug/wasm/frame-inspection': [SKIP],
}],
+##############################################################################
+['isolates', {
+ # WebAssembly debugging does not work reliably when multiple isolates are
+ # involved (https://crbug.com/v8/10359).
+ # (this list might need to be extended by more debugging tests as they
+ # start flaking)
+ 'debug/wasm/debug-enabled-tier-down-wasm': [SKIP],
+ 'debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff': [SKIP],
+ 'regress/regress-crbug-1032042': [SKIP],
+}], # 'isolates'
+
]
diff --git a/deps/v8/test/debugging/debugging.status b/deps/v8/test/debugging/debugging.status
new file mode 100644
index 0000000000..b5ebc84474
--- /dev/null
+++ b/deps/v8/test/debugging/debugging.status
@@ -0,0 +1,5 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[]
diff --git a/deps/v8/test/debugging/testcfg.py b/deps/v8/test/debugging/testcfg.py
new file mode 100644
index 0000000000..6f7fedb25b
--- /dev/null
+++ b/deps/v8/test/debugging/testcfg.py
@@ -0,0 +1,99 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import shlex
+import sys
+
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.objects import testcase
+from testrunner.outproc import message
+
+PY_FLAGS_PATTERN = re.compile(r"#\s+Flags:(.*)")
+
+class PYTestCase(testcase.TestCase):
+
+ def get_shell(self):
+ return os.path.splitext(sys.executable)[0]
+
+ def get_command(self):
+ return super(PYTestCase, self).get_command()
+
+ def _get_cmd_params(self):
+ return self._get_files_params() + ['--', os.path.join(self._test_config.shell_dir, 'd8')] + self._get_source_flags()
+
+ def _get_shell_flags(self):
+ return []
+
+
+class TestCase(PYTestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+
+ source = self.get_source()
+ self._source_files = self._parse_source_files(source)
+ self._source_flags = self._parse_source_flags(source)
+
+ def _parse_source_files(self, source):
+ files = []
+ files.append(self._get_source_path())
+ return files
+
+ def _parse_source_flags(self, source=None):
+ source = source or self.get_source()
+ flags = []
+ for match in re.findall(PY_FLAGS_PATTERN, source):
+ flags += shlex.split(match.strip())
+ return flags
+
+ def _expected_fail(self):
+ path = self.path
+ while path:
+ head, tail = os.path.split(path)
+ if tail == 'fail':
+ return True
+ path = head
+ return False
+
+ def _get_files_params(self):
+ return self._source_files
+
+ def _get_source_flags(self):
+ return self._source_flags
+
+ def _get_source_path(self):
+ base_path = os.path.join(self.suite.root, self.path)
+ if os.path.exists(base_path + self._get_suffix()):
+ return base_path + self._get_suffix()
+ return base_path + '.py'
+
+ def skip_predictable(self):
+ return super(TestCase, self).skip_predictable() or self._expected_fail()
+
+
+class PYTestLoader(testsuite.GenericTestLoader):
+
+ @property
+ def excluded_files(self):
+ return {'gdb_rsp.py', 'testcfg.py', '__init__.py'}
+
+ @property
+ def extensions(self):
+ return ['.py']
+
+
+class TestSuite(testsuite.TestSuite):
+
+ def _test_loader_class(self):
+ return PYTestLoader
+
+ def _test_class(self):
+ return TestCase
+
+
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/debugging/wasm/gdb-server/connect.py b/deps/v8/test/debugging/wasm/gdb-server/connect.py
new file mode 100644
index 0000000000..f2bac29425
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/connect.py
@@ -0,0 +1,41 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm_gdb_remote --wasm-pause-waiting-for-debugger --wasm-interpret-all test/debugging/wasm/gdb-server/test_files/test.js
+
+import os
+import subprocess
+import unittest
+import sys
+import gdb_rsp
+
+# These are set up by Main().
+COMMAND = None
+
+
+class Tests(unittest.TestCase):
+ def test_disconnect(self):
+ process = gdb_rsp.PopenDebugStub(COMMAND)
+ try:
+ # Connect and record the instruction pointer.
+ connection = gdb_rsp.GdbRspConnection()
+ connection.Close()
+ # Reconnect 3 times.
+ for _ in range(3):
+ connection = gdb_rsp.GdbRspConnection()
+ connection.Close()
+ finally:
+ gdb_rsp.KillProcess(process)
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py b/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py
new file mode 100644
index 0000000000..131725f83d
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py
@@ -0,0 +1,73 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import socket
+import subprocess
+import time
+
+SOCKET_ADDR = ('localhost', 8765)
+
+
+def EnsurePortIsAvailable(addr=SOCKET_ADDR):
+ # As a sanity check, check that the TCP port is available by binding to it
+ # ourselves (and then unbinding). Otherwise, we could end up talking to an
+ # old instance of the GDB stub that is still hanging around, or to some
+ # unrelated service that uses the same port number. Of course, there is still
+ # a race condition because an unrelated process could bind the port after we
+ # unbind.
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+ sock.bind(addr)
+ sock.close()
+
+
+class GdbRspConnection(object):
+
+ def __init__(self, addr=SOCKET_ADDR):
+ self._socket = self._Connect(addr)
+
+ def _Connect(self, addr):
+ # We have to poll because we do not know when the GDB stub has successfully
+ # done bind() on the TCP port. This is inherently unreliable.
+ timeout_in_seconds = 10
+ poll_time_in_seconds = 0.1
+ for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
+ # On Mac OS X, we have to create a new socket FD for each retry.
+ sock = socket.socket()
+ # Do not delay sending small packets. This significantly speeds up debug
+ # stub tests.
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
+ try:
+ sock.connect(addr)
+ except socket.error:
+ # Retry after a delay.
+ time.sleep(poll_time_in_seconds)
+ else:
+ return sock
+ raise Exception('Could not connect to the debug stub in %i seconds'
+ % timeout_in_seconds)
+
+ def Close(self):
+ self._socket.close()
+
+
+def PopenDebugStub(command):
+ EnsurePortIsAvailable()
+ return subprocess.Popen(command)
+
+
+def KillProcess(process):
+ if process.returncode is not None:
+ # kill() won't work if we've already wait()'ed on the process.
+ return
+ try:
+ process.kill()
+ except OSError:
+ if sys.platform == 'win32':
+ # If process is already terminated, kill() throws
+ # "WindowsError: [Error 5] Access is denied" on Windows.
+ pass
+ else:
+ raise
+ process.wait()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test.js
new file mode 100644
index 0000000000..0959edca30
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test.js
@@ -0,0 +1,33 @@
+
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('mul', kSig_i_ii)
+// input is 2 args of type int and output is int
+.addBody([
+ kExprLocalGet, 0, // local.get i0
+ kExprLocalGet, 1, // local.get i1
+ kExprI32Mul]) // i32.sub i0 i1
+.exportFunc();
+
+const instance = builder.instantiate();
+const wasm_f = instance.exports.mul;
+
+function f() {
+ var result = wasm_f(21, 2);
+ return result;
+}
+
+try {
+ let val = 0;
+ while (true) {
+ val += f();
+ }
+}
+catch (e) {
+ print('*exception:* ' + e);
+}
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index da151e7115..6e4b099fcd 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -121,14 +121,14 @@ CallDescriptor* CreateRandomCallDescriptor(Zone* zone, size_t return_count,
wasm::FunctionSig::Builder builder(zone, return_count, param_count);
for (size_t i = 0; i < param_count; i++) {
MachineType type = RandomType(input);
- builder.AddParam(wasm::ValueTypes::ValueTypeFor(type));
+ builder.AddParam(wasm::ValueType::For(type));
}
// Read the end byte of the parameters.
input->NextInt8(1);
for (size_t i = 0; i < return_count; i++) {
MachineType type = RandomType(input);
- builder.AddReturn(wasm::ValueTypes::ValueTypeFor(type));
+ builder.AddReturn(wasm::ValueType::For(type));
}
return compiler::GetWasmCallDescriptor(zone, builder.Build());
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 6dafe950fb..61149f134e 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -62,8 +62,7 @@ REGEXP_BUILTINS(CASE)
#undef CASE
v8::Local<v8::String> v8_str(v8::Isolate* isolate, const char* s) {
- return v8::String::NewFromUtf8(isolate, s, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(isolate, s).ToLocalChecked();
}
v8::MaybeLocal<v8::Value> CompileRun(v8::Local<v8::Context> context,
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index e52b418784..3e121298e0 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -78,7 +78,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// Wait for the promise to resolve.
while (!done) {
support->PumpMessageLoop(platform::MessageLoopBehavior::kWaitForWork);
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
}
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 3ffaa89203..1241061a7b 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -16,6 +16,7 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-module-runner.h"
@@ -79,6 +80,9 @@ class DataRange {
};
ValueType GetValueType(DataRange* data) {
+ // TODO(v8:8460): We do not add kWasmS128 here yet because this method is used
+ // to generate globals, and since we do not have v128.const yet, there is no
+ // way to specify an initial value a global of this type.
switch (data->get<uint8_t>() % 4) {
case 0:
return kWasmI32;
@@ -93,7 +97,7 @@ ValueType GetValueType(DataRange* data) {
}
class WasmGenerator {
- template <WasmOpcode Op, ValueType... Args>
+ template <WasmOpcode Op, ValueType::Kind... Args>
void op(DataRange* data) {
Generate<Args...>(data);
builder_->Emit(Op);
@@ -105,8 +109,7 @@ class WasmGenerator {
ValueType br_type)
: gen_(gen) {
gen->blocks_.push_back(br_type);
- gen->builder_->EmitWithU8(block_type,
- ValueTypes::ValueTypeCodeFor(result_type));
+ gen->builder_->EmitWithU8(block_type, result_type.value_type_code());
}
~BlockScope() {
@@ -118,28 +121,28 @@ class WasmGenerator {
WasmGenerator* const gen_;
};
- template <ValueType T>
+ template <ValueType::Kind T>
void block(DataRange* data) {
- BlockScope block_scope(this, kExprBlock, T, T);
+ BlockScope block_scope(this, kExprBlock, ValueType(T), ValueType(T));
Generate<T>(data);
}
- template <ValueType T>
+ template <ValueType::Kind T>
void loop(DataRange* data) {
// When breaking to a loop header, don't provide any input value (hence
// kWasmStmt).
- BlockScope block_scope(this, kExprLoop, T, kWasmStmt);
+ BlockScope block_scope(this, kExprLoop, ValueType(T), kWasmStmt);
Generate<T>(data);
}
enum IfType { kIf, kIfElse };
- template <ValueType T, IfType type>
+ template <ValueType::Kind T, IfType type>
void if_(DataRange* data) {
- static_assert(T == kWasmStmt || type == kIfElse,
+ static_assert(T == ValueType::kStmt || type == kIfElse,
"if without else cannot produce a value");
- Generate<kWasmI32>(data);
- BlockScope block_scope(this, kExprIf, T, T);
+ Generate<ValueType::kI32>(data);
+ BlockScope block_scope(this, kExprIf, ValueType(T), ValueType(T));
Generate<T>(data);
if (type == kIfElse) {
builder_->Emit(kExprElse);
@@ -158,7 +161,7 @@ class WasmGenerator {
kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
}
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void br_if(DataRange* data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
@@ -169,12 +172,15 @@ class WasmGenerator {
Generate(kWasmI32, data);
builder_->EmitWithI32V(
kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
- ConvertOrGenerate(break_type, wanted_type, data);
+ ConvertOrGenerate(break_type, ValueType(wanted_type), data);
}
// TODO(eholk): make this function constexpr once gcc supports it
static uint8_t max_alignment(WasmOpcode memop) {
switch (memop) {
+ case kExprS128LoadMem:
+ case kExprS128StoreMem:
+ return 4;
case kExprI64LoadMem:
case kExprF64LoadMem:
case kExprI64StoreMem:
@@ -186,6 +192,8 @@ class WasmGenerator {
case kExprI64AtomicAnd:
case kExprI64AtomicOr:
case kExprI64AtomicXor:
+ case kExprI64AtomicExchange:
+ case kExprI64AtomicCompareExchange:
return 3;
case kExprI32LoadMem:
case kExprI64LoadMem32S:
@@ -203,11 +211,15 @@ class WasmGenerator {
case kExprI32AtomicAnd:
case kExprI32AtomicOr:
case kExprI32AtomicXor:
+ case kExprI32AtomicExchange:
+ case kExprI32AtomicCompareExchange:
case kExprI64AtomicAdd32U:
case kExprI64AtomicSub32U:
case kExprI64AtomicAnd32U:
case kExprI64AtomicOr32U:
case kExprI64AtomicXor32U:
+ case kExprI64AtomicExchange32U:
+ case kExprI64AtomicCompareExchange32U:
return 2;
case kExprI32LoadMem16S:
case kExprI32LoadMem16U:
@@ -224,11 +236,15 @@ class WasmGenerator {
case kExprI32AtomicAnd16U:
case kExprI32AtomicOr16U:
case kExprI32AtomicXor16U:
+ case kExprI32AtomicExchange16U:
+ case kExprI32AtomicCompareExchange16U:
case kExprI64AtomicAdd16U:
case kExprI64AtomicSub16U:
case kExprI64AtomicAnd16U:
case kExprI64AtomicOr16U:
case kExprI64AtomicXor16U:
+ case kExprI64AtomicExchange16U:
+ case kExprI64AtomicCompareExchange16U:
return 1;
case kExprI32LoadMem8S:
case kExprI32LoadMem8U:
@@ -245,27 +261,31 @@ class WasmGenerator {
case kExprI32AtomicAnd8U:
case kExprI32AtomicOr8U:
case kExprI32AtomicXor8U:
+ case kExprI32AtomicExchange8U:
+ case kExprI32AtomicCompareExchange8U:
case kExprI64AtomicAdd8U:
case kExprI64AtomicSub8U:
case kExprI64AtomicAnd8U:
case kExprI64AtomicOr8U:
case kExprI64AtomicXor8U:
+ case kExprI64AtomicExchange8U:
+ case kExprI64AtomicCompareExchange8U:
return 0;
default:
return 0;
}
}
- template <WasmOpcode memory_op, ValueType... arg_types>
+ template <WasmOpcode memory_op, ValueType::Kind... arg_types>
void memop(DataRange* data) {
const uint8_t align = data->get<uint8_t>() % (max_alignment(memory_op) + 1);
const uint32_t offset = data->get<uint32_t>();
// Generate the index and the arguments, if any.
- Generate<kWasmI32, arg_types...>(data);
+ Generate<ValueType::kI32, arg_types...>(data);
- if ((memory_op & 0xfe00) == 0xfe00) {
- // This is an atomic-load or atomic-store.
+ if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(memory_op >> 8))) {
+ DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix);
builder_->EmitWithPrefix(memory_op);
} else {
builder_->Emit(memory_op);
@@ -274,7 +294,7 @@ class WasmGenerator {
builder_->EmitU32V(offset);
}
- template <WasmOpcode Op, ValueType... Args>
+ template <WasmOpcode Op, ValueType::Kind... Args>
void atomic_op(DataRange* data) {
const uint8_t align = data->get<uint8_t>() % (max_alignment(Op) + 1);
const uint32_t offset = data->get<uint32_t>();
@@ -286,26 +306,32 @@ class WasmGenerator {
builder_->EmitU32V(offset);
}
+ template <WasmOpcode Op, ValueType::Kind... Args>
+ void simd_op(DataRange* data) {
+ Generate<Args...>(data);
+ builder_->EmitWithPrefix(Op);
+ }
+
void drop(DataRange* data) {
Generate(GetValueType(data), data);
builder_->Emit(kExprDrop);
}
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void call(DataRange* data) {
- call(data, wanted_type);
+ call(data, ValueType(wanted_type));
}
void Convert(ValueType src, ValueType dst) {
auto idx = [](ValueType t) -> int {
- switch (t) {
- case kWasmI32:
+ switch (t.kind()) {
+ case ValueType::kI32:
return 0;
- case kWasmI64:
+ case ValueType::kI64:
return 1;
- case kWasmF32:
+ case ValueType::kF32:
return 2;
- case kWasmF64:
+ case ValueType::kF64:
return 3;
default:
UNREACHABLE();
@@ -378,32 +404,34 @@ class WasmGenerator {
return {index, type};
}
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void local_op(DataRange* data, WasmOpcode opcode) {
Var local = GetRandomLocal(data);
// If there are no locals and no parameters, just generate any value (if a
// value is needed), or do nothing.
if (!local.is_valid()) {
- if (wanted_type == kWasmStmt) return;
+ if (wanted_type == ValueType::kStmt) return;
return Generate<wanted_type>(data);
}
if (opcode != kExprLocalGet) Generate(local.type, data);
builder_->EmitWithU32V(opcode, local.index);
- if (wanted_type != kWasmStmt && local.type != wanted_type) {
- Convert(local.type, wanted_type);
+ if (wanted_type != ValueType::kStmt && local.type.kind() != wanted_type) {
+ Convert(local.type, ValueType(wanted_type));
}
}
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void get_local(DataRange* data) {
- static_assert(wanted_type != kWasmStmt, "illegal type");
+ static_assert(wanted_type != ValueType::kStmt, "illegal type");
local_op<wanted_type>(data, kExprLocalGet);
}
- void set_local(DataRange* data) { local_op<kWasmStmt>(data, kExprLocalSet); }
+ void set_local(DataRange* data) {
+ local_op<ValueType::kStmt>(data, kExprLocalSet);
+ }
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void tee_local(DataRange* data) {
local_op<wanted_type>(data, kExprLocalTee);
}
@@ -431,44 +459,44 @@ class WasmGenerator {
return {index, type};
}
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void global_op(DataRange* data) {
- constexpr bool is_set = wanted_type == kWasmStmt;
+ constexpr bool is_set = wanted_type == ValueType::kStmt;
Var global = GetRandomGlobal(data, is_set);
// If there are no globals, just generate any value (if a value is needed),
// or do nothing.
if (!global.is_valid()) {
- if (wanted_type == kWasmStmt) return;
+ if (wanted_type == ValueType::kStmt) return;
return Generate<wanted_type>(data);
}
if (is_set) Generate(global.type, data);
builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet,
global.index);
- if (!is_set && global.type != wanted_type) {
- Convert(global.type, wanted_type);
+ if (!is_set && global.type.kind() != wanted_type) {
+ Convert(global.type, ValueType(wanted_type));
}
}
- template <ValueType wanted_type>
+ template <ValueType::Kind wanted_type>
void get_global(DataRange* data) {
- static_assert(wanted_type != kWasmStmt, "illegal type");
+ static_assert(wanted_type != ValueType::kStmt, "illegal type");
global_op<wanted_type>(data);
}
- template <ValueType select_type>
+ template <ValueType::Kind select_type>
void select_with_type(DataRange* data) {
- static_assert(select_type != kWasmStmt, "illegal type for select");
- Generate<select_type, select_type, kWasmI32>(data);
+ static_assert(select_type != ValueType::kStmt, "illegal type for select");
+ Generate<select_type, select_type, ValueType::kI32>(data);
// num_types is always 1.
uint8_t num_types = 1;
builder_->EmitWithU8U8(kExprSelectWithType, num_types,
- ValueTypes::ValueTypeCodeFor(select_type));
+ ValueType(select_type).value_type_code());
}
- void set_global(DataRange* data) { global_op<kWasmStmt>(data); }
+ void set_global(DataRange* data) { global_op<ValueType::kStmt>(data); }
- template <ValueType... Types>
+ template <ValueType::Kind... Types>
void sequence(DataRange* data) {
Generate<Types...>(data);
}
@@ -526,10 +554,10 @@ class WasmGenerator {
void Generate(ValueType type, DataRange* data);
- template <ValueType T>
+ template <ValueType::Kind T>
void Generate(DataRange* data);
- template <ValueType T1, ValueType T2, ValueType... Ts>
+ template <ValueType::Kind T1, ValueType::Kind T2, ValueType::Kind... Ts>
void Generate(DataRange* data) {
// TODO(clemensb): Implement a more even split.
auto first_data = data->split();
@@ -554,42 +582,46 @@ class WasmGenerator {
};
template <>
-void WasmGenerator::Generate<kWasmStmt>(DataRange* data) {
+void WasmGenerator::Generate<ValueType::kStmt>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() == 0) return;
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kWasmStmt, kWasmStmt>,
- &WasmGenerator::sequence<kWasmStmt, kWasmStmt, kWasmStmt, kWasmStmt>,
- &WasmGenerator::sequence<kWasmStmt, kWasmStmt, kWasmStmt, kWasmStmt,
- kWasmStmt, kWasmStmt, kWasmStmt, kWasmStmt>,
- &WasmGenerator::block<kWasmStmt>,
- &WasmGenerator::loop<kWasmStmt>,
- &WasmGenerator::if_<kWasmStmt, kIf>,
- &WasmGenerator::if_<kWasmStmt, kIfElse>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kStmt>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kStmt,
+ ValueType::kStmt, ValueType::kStmt>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kStmt,
+ ValueType::kStmt, ValueType::kStmt,
+ ValueType::kStmt, ValueType::kStmt,
+ ValueType::kStmt, ValueType::kStmt>,
+ &WasmGenerator::block<ValueType::kStmt>,
+ &WasmGenerator::loop<ValueType::kStmt>,
+ &WasmGenerator::if_<ValueType::kStmt, kIf>,
+ &WasmGenerator::if_<ValueType::kStmt, kIfElse>,
&WasmGenerator::br,
- &WasmGenerator::br_if<kWasmStmt>,
-
- &WasmGenerator::memop<kExprI32StoreMem, kWasmI32>,
- &WasmGenerator::memop<kExprI32StoreMem8, kWasmI32>,
- &WasmGenerator::memop<kExprI32StoreMem16, kWasmI32>,
- &WasmGenerator::memop<kExprI64StoreMem, kWasmI64>,
- &WasmGenerator::memop<kExprI64StoreMem8, kWasmI64>,
- &WasmGenerator::memop<kExprI64StoreMem16, kWasmI64>,
- &WasmGenerator::memop<kExprI64StoreMem32, kWasmI64>,
- &WasmGenerator::memop<kExprF32StoreMem, kWasmF32>,
- &WasmGenerator::memop<kExprF64StoreMem, kWasmF64>,
- &WasmGenerator::memop<kExprI32AtomicStore, kWasmI32>,
- &WasmGenerator::memop<kExprI32AtomicStore8U, kWasmI32>,
- &WasmGenerator::memop<kExprI32AtomicStore16U, kWasmI32>,
- &WasmGenerator::memop<kExprI64AtomicStore, kWasmI64>,
- &WasmGenerator::memop<kExprI64AtomicStore8U, kWasmI64>,
- &WasmGenerator::memop<kExprI64AtomicStore16U, kWasmI64>,
- &WasmGenerator::memop<kExprI64AtomicStore32U, kWasmI64>,
+ &WasmGenerator::br_if<ValueType::kStmt>,
+
+ &WasmGenerator::memop<kExprI32StoreMem, ValueType::kI32>,
+ &WasmGenerator::memop<kExprI32StoreMem8, ValueType::kI32>,
+ &WasmGenerator::memop<kExprI32StoreMem16, ValueType::kI32>,
+ &WasmGenerator::memop<kExprI64StoreMem, ValueType::kI64>,
+ &WasmGenerator::memop<kExprI64StoreMem8, ValueType::kI64>,
+ &WasmGenerator::memop<kExprI64StoreMem16, ValueType::kI64>,
+ &WasmGenerator::memop<kExprI64StoreMem32, ValueType::kI64>,
+ &WasmGenerator::memop<kExprF32StoreMem, ValueType::kF32>,
+ &WasmGenerator::memop<kExprF64StoreMem, ValueType::kF64>,
+ &WasmGenerator::memop<kExprI32AtomicStore, ValueType::kI32>,
+ &WasmGenerator::memop<kExprI32AtomicStore8U, ValueType::kI32>,
+ &WasmGenerator::memop<kExprI32AtomicStore16U, ValueType::kI32>,
+ &WasmGenerator::memop<kExprI64AtomicStore, ValueType::kI64>,
+ &WasmGenerator::memop<kExprI64AtomicStore8U, ValueType::kI64>,
+ &WasmGenerator::memop<kExprI64AtomicStore16U, ValueType::kI64>,
+ &WasmGenerator::memop<kExprI64AtomicStore32U, ValueType::kI64>,
+ &WasmGenerator::memop<kExprS128StoreMem, ValueType::kS128>,
&WasmGenerator::drop,
- &WasmGenerator::call<kWasmStmt>,
+ &WasmGenerator::call<ValueType::kStmt>,
&WasmGenerator::set_local,
&WasmGenerator::set_global};
@@ -598,7 +630,7 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange* data) {
}
template <>
-void WasmGenerator::Generate<kWasmI32>(DataRange* data) {
+void WasmGenerator::Generate<ValueType::kI32>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= 1) {
builder_->EmitI32Const(data->get<uint32_t>());
@@ -611,69 +643,70 @@ void WasmGenerator::Generate<kWasmI32>(DataRange* data) {
&WasmGenerator::i32_const<3>,
&WasmGenerator::i32_const<4>,
- &WasmGenerator::sequence<kWasmI32, kWasmStmt>,
- &WasmGenerator::sequence<kWasmStmt, kWasmI32>,
- &WasmGenerator::sequence<kWasmStmt, kWasmI32, kWasmStmt>,
-
- &WasmGenerator::op<kExprI32Eqz, kWasmI32>,
- &WasmGenerator::op<kExprI32Eq, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Ne, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32LtS, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32LtU, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32GeS, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32GeU, kWasmI32, kWasmI32>,
-
- &WasmGenerator::op<kExprI64Eqz, kWasmI64>,
- &WasmGenerator::op<kExprI64Eq, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Ne, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64LtS, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64LtU, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64GeS, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64GeU, kWasmI64, kWasmI64>,
-
- &WasmGenerator::op<kExprF32Eq, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Ne, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Lt, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Ge, kWasmF32, kWasmF32>,
-
- &WasmGenerator::op<kExprF64Eq, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Ne, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Lt, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Ge, kWasmF64, kWasmF64>,
-
- &WasmGenerator::op<kExprI32Add, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Sub, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Mul, kWasmI32, kWasmI32>,
-
- &WasmGenerator::op<kExprI32DivS, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32DivU, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32RemS, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32RemU, kWasmI32, kWasmI32>,
-
- &WasmGenerator::op<kExprI32And, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Ior, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Xor, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Shl, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32ShrU, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32ShrS, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Ror, kWasmI32, kWasmI32>,
- &WasmGenerator::op<kExprI32Rol, kWasmI32, kWasmI32>,
-
- &WasmGenerator::op<kExprI32Clz, kWasmI32>,
- &WasmGenerator::op<kExprI32Ctz, kWasmI32>,
- &WasmGenerator::op<kExprI32Popcnt, kWasmI32>,
-
- &WasmGenerator::op<kExprI32ConvertI64, kWasmI64>,
- &WasmGenerator::op<kExprI32SConvertF32, kWasmF32>,
- &WasmGenerator::op<kExprI32UConvertF32, kWasmF32>,
- &WasmGenerator::op<kExprI32SConvertF64, kWasmF64>,
- &WasmGenerator::op<kExprI32UConvertF64, kWasmF64>,
- &WasmGenerator::op<kExprI32ReinterpretF32, kWasmF32>,
-
- &WasmGenerator::block<kWasmI32>,
- &WasmGenerator::loop<kWasmI32>,
- &WasmGenerator::if_<kWasmI32, kIfElse>,
- &WasmGenerator::br_if<kWasmI32>,
+ &WasmGenerator::sequence<ValueType::kI32, ValueType::kStmt>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI32>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI32,
+ ValueType::kStmt>,
+
+ &WasmGenerator::op<kExprI32Eqz, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Eq, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Ne, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32LtS, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32LtU, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32GeS, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32GeU, ValueType::kI32, ValueType::kI32>,
+
+ &WasmGenerator::op<kExprI64Eqz, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Eq, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Ne, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64LtS, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64LtU, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64GeS, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64GeU, ValueType::kI64, ValueType::kI64>,
+
+ &WasmGenerator::op<kExprF32Eq, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Ne, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Lt, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Ge, ValueType::kF32, ValueType::kF32>,
+
+ &WasmGenerator::op<kExprF64Eq, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Ne, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Lt, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Ge, ValueType::kF64, ValueType::kF64>,
+
+ &WasmGenerator::op<kExprI32Add, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Sub, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Mul, ValueType::kI32, ValueType::kI32>,
+
+ &WasmGenerator::op<kExprI32DivS, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32DivU, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32RemS, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32RemU, ValueType::kI32, ValueType::kI32>,
+
+ &WasmGenerator::op<kExprI32And, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Ior, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Xor, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Shl, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32ShrU, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32ShrS, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Ror, ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Rol, ValueType::kI32, ValueType::kI32>,
+
+ &WasmGenerator::op<kExprI32Clz, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Ctz, ValueType::kI32>,
+ &WasmGenerator::op<kExprI32Popcnt, ValueType::kI32>,
+
+ &WasmGenerator::op<kExprI32ConvertI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI32SConvertF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprI32UConvertF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprI32SConvertF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprI32UConvertF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprI32ReinterpretF32, ValueType::kF32>,
+
+ &WasmGenerator::block<ValueType::kI32>,
+ &WasmGenerator::loop<ValueType::kI32>,
+ &WasmGenerator::if_<ValueType::kI32, kIfElse>,
+ &WasmGenerator::br_if<ValueType::kI32>,
&WasmGenerator::memop<kExprI32LoadMem>,
&WasmGenerator::memop<kExprI32LoadMem8S>,
@@ -684,38 +717,72 @@ void WasmGenerator::Generate<kWasmI32>(DataRange* data) {
&WasmGenerator::memop<kExprI32AtomicLoad8U>,
&WasmGenerator::memop<kExprI32AtomicLoad16U>,
- &WasmGenerator::atomic_op<kExprI32AtomicAdd, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicSub, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAnd, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicOr, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicXor, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAdd8U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicSub8U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAnd8U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicOr8U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicXor8U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAdd16U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicSub16U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAnd16U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicOr16U, kWasmI32, kWasmI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicXor16U, kWasmI32, kWasmI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAdd, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicSub, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAnd, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicOr, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicXor, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicExchange, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange, ValueType::kI32,
+ ValueType::kI32, ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAdd8U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicSub8U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAnd8U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicOr8U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicXor8U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicExchange8U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange8U,
+ ValueType::kI32, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAdd16U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicSub16U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAnd16U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicOr16U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicXor16U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicExchange16U, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange16U,
+ ValueType::kI32, ValueType::kI32,
+ ValueType::kI32>,
+
+ &WasmGenerator::simd_op<kExprS1x16AnyTrue, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprS1x8AnyTrue, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprS1x4AnyTrue, ValueType::kS128>,
&WasmGenerator::current_memory,
&WasmGenerator::grow_memory,
- &WasmGenerator::get_local<kWasmI32>,
- &WasmGenerator::tee_local<kWasmI32>,
- &WasmGenerator::get_global<kWasmI32>,
- &WasmGenerator::op<kExprSelect, kWasmI32, kWasmI32, kWasmI32>,
- &WasmGenerator::select_with_type<kWasmI32>,
+ &WasmGenerator::get_local<ValueType::kI32>,
+ &WasmGenerator::tee_local<ValueType::kI32>,
+ &WasmGenerator::get_global<ValueType::kI32>,
+ &WasmGenerator::op<kExprSelect, ValueType::kI32, ValueType::kI32,
+ ValueType::kI32>,
+ &WasmGenerator::select_with_type<ValueType::kI32>,
- &WasmGenerator::call<kWasmI32>};
+ &WasmGenerator::call<ValueType::kI32>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<kWasmI64>(DataRange* data) {
+void WasmGenerator::Generate<ValueType::kI64>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= 1) {
builder_->EmitI64Const(data->get<int64_t>());
@@ -732,36 +799,37 @@ void WasmGenerator::Generate<kWasmI64>(DataRange* data) {
&WasmGenerator::i64_const<7>,
&WasmGenerator::i64_const<8>,
- &WasmGenerator::sequence<kWasmI64, kWasmStmt>,
- &WasmGenerator::sequence<kWasmStmt, kWasmI64>,
- &WasmGenerator::sequence<kWasmStmt, kWasmI64, kWasmStmt>,
-
- &WasmGenerator::op<kExprI64Add, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Sub, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Mul, kWasmI64, kWasmI64>,
-
- &WasmGenerator::op<kExprI64DivS, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64DivU, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64RemS, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64RemU, kWasmI64, kWasmI64>,
-
- &WasmGenerator::op<kExprI64And, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Ior, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Xor, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Shl, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64ShrU, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64ShrS, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Ror, kWasmI64, kWasmI64>,
- &WasmGenerator::op<kExprI64Rol, kWasmI64, kWasmI64>,
-
- &WasmGenerator::op<kExprI64Clz, kWasmI64>,
- &WasmGenerator::op<kExprI64Ctz, kWasmI64>,
- &WasmGenerator::op<kExprI64Popcnt, kWasmI64>,
-
- &WasmGenerator::block<kWasmI64>,
- &WasmGenerator::loop<kWasmI64>,
- &WasmGenerator::if_<kWasmI64, kIfElse>,
- &WasmGenerator::br_if<kWasmI64>,
+ &WasmGenerator::sequence<ValueType::kI64, ValueType::kStmt>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI64>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI64,
+ ValueType::kStmt>,
+
+ &WasmGenerator::op<kExprI64Add, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Sub, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Mul, ValueType::kI64, ValueType::kI64>,
+
+ &WasmGenerator::op<kExprI64DivS, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64DivU, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64RemS, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64RemU, ValueType::kI64, ValueType::kI64>,
+
+ &WasmGenerator::op<kExprI64And, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Ior, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Xor, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Shl, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64ShrU, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64ShrS, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Ror, ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Rol, ValueType::kI64, ValueType::kI64>,
+
+ &WasmGenerator::op<kExprI64Clz, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Ctz, ValueType::kI64>,
+ &WasmGenerator::op<kExprI64Popcnt, ValueType::kI64>,
+
+ &WasmGenerator::block<ValueType::kI64>,
+ &WasmGenerator::loop<ValueType::kI64>,
+ &WasmGenerator::if_<ValueType::kI64, kIfElse>,
+ &WasmGenerator::br_if<ValueType::kI64>,
&WasmGenerator::memop<kExprI64LoadMem>,
&WasmGenerator::memop<kExprI64LoadMem8S>,
@@ -775,40 +843,80 @@ void WasmGenerator::Generate<kWasmI64>(DataRange* data) {
&WasmGenerator::memop<kExprI64AtomicLoad16U>,
&WasmGenerator::memop<kExprI64AtomicLoad32U>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd8U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub8U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd8U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr8U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor8U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd16U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub16U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd16U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr16U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor16U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd32U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub32U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd32U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr32U, kWasmI32, kWasmI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor32U, kWasmI32, kWasmI64>,
-
- &WasmGenerator::get_local<kWasmI64>,
- &WasmGenerator::tee_local<kWasmI64>,
- &WasmGenerator::get_global<kWasmI64>,
- &WasmGenerator::op<kExprSelect, kWasmI64, kWasmI64, kWasmI32>,
- &WasmGenerator::select_with_type<kWasmI64>,
-
- &WasmGenerator::call<kWasmI64>};
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange, ValueType::kI32,
+ ValueType::kI64, ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd8U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub8U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd8U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr8U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor8U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange8U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange8U,
+ ValueType::kI32, ValueType::kI64,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd16U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub16U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd16U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr16U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor16U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange16U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange16U,
+ ValueType::kI32, ValueType::kI64,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd32U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub32U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd32U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr32U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor32U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange32U, ValueType::kI32,
+ ValueType::kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange32U,
+ ValueType::kI32, ValueType::kI64,
+ ValueType::kI64>,
+
+ &WasmGenerator::get_local<ValueType::kI64>,
+ &WasmGenerator::tee_local<ValueType::kI64>,
+ &WasmGenerator::get_global<ValueType::kI64>,
+ &WasmGenerator::op<kExprSelect, ValueType::kI64, ValueType::kI64,
+ ValueType::kI32>,
+ &WasmGenerator::select_with_type<ValueType::kI64>,
+
+ &WasmGenerator::call<ValueType::kI64>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<kWasmF32>(DataRange* data) {
+void WasmGenerator::Generate<ValueType::kF32>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= sizeof(float)) {
builder_->EmitF32Const(data->get<float>());
@@ -816,52 +924,54 @@ void WasmGenerator::Generate<kWasmF32>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kWasmF32, kWasmStmt>,
- &WasmGenerator::sequence<kWasmStmt, kWasmF32>,
- &WasmGenerator::sequence<kWasmStmt, kWasmF32, kWasmStmt>,
-
- &WasmGenerator::op<kExprF32Abs, kWasmF32>,
- &WasmGenerator::op<kExprF32Neg, kWasmF32>,
- &WasmGenerator::op<kExprF32Ceil, kWasmF32>,
- &WasmGenerator::op<kExprF32Floor, kWasmF32>,
- &WasmGenerator::op<kExprF32Trunc, kWasmF32>,
- &WasmGenerator::op<kExprF32NearestInt, kWasmF32>,
- &WasmGenerator::op<kExprF32Sqrt, kWasmF32>,
- &WasmGenerator::op<kExprF32Add, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Sub, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Mul, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Div, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Min, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32Max, kWasmF32, kWasmF32>,
- &WasmGenerator::op<kExprF32CopySign, kWasmF32, kWasmF32>,
-
- &WasmGenerator::op<kExprF32SConvertI32, kWasmI32>,
- &WasmGenerator::op<kExprF32UConvertI32, kWasmI32>,
- &WasmGenerator::op<kExprF32SConvertI64, kWasmI64>,
- &WasmGenerator::op<kExprF32UConvertI64, kWasmI64>,
- &WasmGenerator::op<kExprF32ConvertF64, kWasmF64>,
- &WasmGenerator::op<kExprF32ReinterpretI32, kWasmI32>,
-
- &WasmGenerator::block<kWasmF32>,
- &WasmGenerator::loop<kWasmF32>,
- &WasmGenerator::if_<kWasmF32, kIfElse>,
- &WasmGenerator::br_if<kWasmF32>,
+ &WasmGenerator::sequence<ValueType::kF32, ValueType::kStmt>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF32>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF32,
+ ValueType::kStmt>,
+
+ &WasmGenerator::op<kExprF32Abs, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Neg, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Ceil, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Floor, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Trunc, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32NearestInt, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Sqrt, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Add, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Sub, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Mul, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Div, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Min, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32Max, ValueType::kF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF32CopySign, ValueType::kF32, ValueType::kF32>,
+
+ &WasmGenerator::op<kExprF32SConvertI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprF32UConvertI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprF32SConvertI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprF32UConvertI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprF32ConvertF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF32ReinterpretI32, ValueType::kI32>,
+
+ &WasmGenerator::block<ValueType::kF32>,
+ &WasmGenerator::loop<ValueType::kF32>,
+ &WasmGenerator::if_<ValueType::kF32, kIfElse>,
+ &WasmGenerator::br_if<ValueType::kF32>,
&WasmGenerator::memop<kExprF32LoadMem>,
- &WasmGenerator::get_local<kWasmF32>,
- &WasmGenerator::tee_local<kWasmF32>,
- &WasmGenerator::get_global<kWasmF32>,
- &WasmGenerator::op<kExprSelect, kWasmF32, kWasmF32, kWasmI32>,
- &WasmGenerator::select_with_type<kWasmF32>,
+ &WasmGenerator::get_local<ValueType::kF32>,
+ &WasmGenerator::tee_local<ValueType::kF32>,
+ &WasmGenerator::get_global<ValueType::kF32>,
+ &WasmGenerator::op<kExprSelect, ValueType::kF32, ValueType::kF32,
+ ValueType::kI32>,
+ &WasmGenerator::select_with_type<ValueType::kF32>,
- &WasmGenerator::call<kWasmF32>};
+ &WasmGenerator::call<ValueType::kF32>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<kWasmF64>(DataRange* data) {
+void WasmGenerator::Generate<ValueType::kF64>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= sizeof(double)) {
builder_->EmitF64Const(data->get<double>());
@@ -869,67 +979,216 @@ void WasmGenerator::Generate<kWasmF64>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kWasmF64, kWasmStmt>,
- &WasmGenerator::sequence<kWasmStmt, kWasmF64>,
- &WasmGenerator::sequence<kWasmStmt, kWasmF64, kWasmStmt>,
-
- &WasmGenerator::op<kExprF64Abs, kWasmF64>,
- &WasmGenerator::op<kExprF64Neg, kWasmF64>,
- &WasmGenerator::op<kExprF64Ceil, kWasmF64>,
- &WasmGenerator::op<kExprF64Floor, kWasmF64>,
- &WasmGenerator::op<kExprF64Trunc, kWasmF64>,
- &WasmGenerator::op<kExprF64NearestInt, kWasmF64>,
- &WasmGenerator::op<kExprF64Sqrt, kWasmF64>,
- &WasmGenerator::op<kExprF64Add, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Sub, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Mul, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Div, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Min, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64Max, kWasmF64, kWasmF64>,
- &WasmGenerator::op<kExprF64CopySign, kWasmF64, kWasmF64>,
-
- &WasmGenerator::op<kExprF64SConvertI32, kWasmI32>,
- &WasmGenerator::op<kExprF64UConvertI32, kWasmI32>,
- &WasmGenerator::op<kExprF64SConvertI64, kWasmI64>,
- &WasmGenerator::op<kExprF64UConvertI64, kWasmI64>,
- &WasmGenerator::op<kExprF64ConvertF32, kWasmF32>,
- &WasmGenerator::op<kExprF64ReinterpretI64, kWasmI64>,
-
- &WasmGenerator::block<kWasmF64>,
- &WasmGenerator::loop<kWasmF64>,
- &WasmGenerator::if_<kWasmF64, kIfElse>,
- &WasmGenerator::br_if<kWasmF64>,
+ &WasmGenerator::sequence<ValueType::kF64, ValueType::kStmt>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF64>,
+ &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF64,
+ ValueType::kStmt>,
+
+ &WasmGenerator::op<kExprF64Abs, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Neg, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Ceil, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Floor, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Trunc, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64NearestInt, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Sqrt, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Add, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Sub, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Mul, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Div, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Min, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64Max, ValueType::kF64, ValueType::kF64>,
+ &WasmGenerator::op<kExprF64CopySign, ValueType::kF64, ValueType::kF64>,
+
+ &WasmGenerator::op<kExprF64SConvertI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprF64UConvertI32, ValueType::kI32>,
+ &WasmGenerator::op<kExprF64SConvertI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprF64UConvertI64, ValueType::kI64>,
+ &WasmGenerator::op<kExprF64ConvertF32, ValueType::kF32>,
+ &WasmGenerator::op<kExprF64ReinterpretI64, ValueType::kI64>,
+
+ &WasmGenerator::block<ValueType::kF64>,
+ &WasmGenerator::loop<ValueType::kF64>,
+ &WasmGenerator::if_<ValueType::kF64, kIfElse>,
+ &WasmGenerator::br_if<ValueType::kF64>,
&WasmGenerator::memop<kExprF64LoadMem>,
- &WasmGenerator::get_local<kWasmF64>,
- &WasmGenerator::tee_local<kWasmF64>,
- &WasmGenerator::get_global<kWasmF64>,
- &WasmGenerator::op<kExprSelect, kWasmF64, kWasmF64, kWasmI32>,
- &WasmGenerator::select_with_type<kWasmF64>,
+ &WasmGenerator::get_local<ValueType::kF64>,
+ &WasmGenerator::tee_local<ValueType::kF64>,
+ &WasmGenerator::get_global<ValueType::kF64>,
+ &WasmGenerator::op<kExprSelect, ValueType::kF64, ValueType::kF64,
+ ValueType::kI32>,
+ &WasmGenerator::select_with_type<ValueType::kF64>,
+
+ &WasmGenerator::call<ValueType::kF64>};
- &WasmGenerator::call<kWasmF64>};
+ GenerateOneOf(alternatives, data);
+}
+
+template <>
+void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data->size() <= sizeof(int32_t)) {
+ // TODO(v8:8460): v128.const is not implemented yet, and we need a way to
+ // "bottom-out", so use a splat to generate this.
+ builder_->EmitI32Const(data->get<int32_t>());
+ builder_->EmitWithPrefix(kExprI8x16Splat);
+ return;
+ }
+
+ constexpr GenerateFn alternatives[] = {
+ &WasmGenerator::simd_op<kExprI8x16Splat, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI8x16Eq, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16Ne, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16LtS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16LtU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16GtS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16GtU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16LeS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16LeU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16GeS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16GeU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16Neg, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16Shl, ValueType::kS128, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI8x16ShrS, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI8x16ShrU, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI8x16Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16AddSaturateS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16AddSaturateU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16Sub, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16SubSaturateS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16SubSaturateU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16MinS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16MinU, ValueType::kS128,
+ ValueType::kS128>,
+ // I8x16Mul is prototyped but not in the proposal, thus omitted here.
+ &WasmGenerator::simd_op<kExprI8x16MaxS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI8x16MaxU, ValueType::kS128,
+ ValueType::kS128>,
+
+ &WasmGenerator::simd_op<kExprI16x8Splat, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI16x8Eq, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8Ne, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8LtS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8LtU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8GtS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8GtU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8LeS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8LeU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8GeS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8GeU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8Neg, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8Shl, ValueType::kS128, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI16x8ShrS, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI16x8ShrU, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI16x8Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8AddSaturateS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8AddSaturateU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8Sub, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8SubSaturateS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8SubSaturateU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8Mul, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8MinS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8MinU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8MaxS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8MaxU, ValueType::kS128,
+ ValueType::kS128>,
+
+ &WasmGenerator::simd_op<kExprI32x4Splat, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI32x4Eq, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4Ne, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4LtS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4LtU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4GtS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4GtU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4LeS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4LeU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4GeS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4GeU, ValueType::kS128,
+ ValueType::kS128>,
+
+ &WasmGenerator::simd_op<kExprI64x2Splat, ValueType::kI64>,
+ &WasmGenerator::simd_op<kExprF32x4Splat, ValueType::kF32>,
+ &WasmGenerator::simd_op<kExprF64x2Splat, ValueType::kF64>,
+
+ &WasmGenerator::simd_op<kExprI32x4Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI64x2Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Add, ValueType::kS128,
+ ValueType::kS128>,
+
+ &WasmGenerator::memop<kExprS128LoadMem>};
GenerateOneOf(alternatives, data);
}
void WasmGenerator::grow_memory(DataRange* data) {
- Generate<kWasmI32>(data);
+ Generate<ValueType::kI32>(data);
builder_->EmitWithU8(kExprMemoryGrow, 0);
}
void WasmGenerator::Generate(ValueType type, DataRange* data) {
- switch (type) {
- case kWasmStmt:
- return Generate<kWasmStmt>(data);
- case kWasmI32:
- return Generate<kWasmI32>(data);
- case kWasmI64:
- return Generate<kWasmI64>(data);
- case kWasmF32:
- return Generate<kWasmF32>(data);
- case kWasmF64:
- return Generate<kWasmF64>(data);
+ switch (type.kind()) {
+ case ValueType::kStmt:
+ return Generate<ValueType::kStmt>(data);
+ case ValueType::kI32:
+ return Generate<ValueType::kI32>(data);
+ case ValueType::kI64:
+ return Generate<ValueType::kI64>(data);
+ case ValueType::kF32:
+ return Generate<ValueType::kF32>(data);
+ case ValueType::kF64:
+ return Generate<ValueType::kF64>(data);
+ case ValueType::kS128:
+ return Generate<ValueType::kS128>(data);
default:
UNREACHABLE();
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 9fbfa0b3c3..590bc1bc17 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -80,20 +80,20 @@ PrintSig PrintReturns(const FunctionSig* sig) {
return {sig->return_count(), [=](size_t i) { return sig->GetReturn(i); }};
}
const char* ValueTypeToConstantName(ValueType type) {
- switch (type) {
- case kWasmI32:
+ switch (type.kind()) {
+ case ValueType::kI32:
return "kWasmI32";
- case kWasmI64:
+ case ValueType::kI64:
return "kWasmI64";
- case kWasmF32:
+ case ValueType::kF32:
return "kWasmF32";
- case kWasmF64:
+ case ValueType::kF64:
return "kWasmF64";
- case kWasmAnyRef:
+ case ValueType::kAnyRef:
return "kWasmAnyRef";
- case kWasmFuncRef:
+ case ValueType::kFuncRef:
return "kWasmFuncRef";
- case kWasmExnRef:
+ case ValueType::kExnRef:
return "kWasmExnRef";
default:
UNREACHABLE();
@@ -151,11 +151,10 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
"\n"
"load('test/mjsunit/wasm/wasm-module-builder.js');\n"
"\n"
- "(function() {\n"
- " const builder = new WasmModuleBuilder();\n";
+ "const builder = new WasmModuleBuilder();\n";
if (module->has_memory) {
- os << " builder.addMemory(" << module->initial_pages;
+ os << "builder.addMemory(" << module->initial_pages;
if (module->has_maximum_pages) {
os << ", " << module->maximum_pages;
} else {
@@ -169,12 +168,12 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
for (WasmGlobal& glob : module->globals) {
- os << " builder.addGlobal(" << ValueTypeToConstantName(glob.type) << ", "
+ os << "builder.addGlobal(" << ValueTypeToConstantName(glob.type) << ", "
<< glob.mutability << ");\n";
}
for (const FunctionSig* sig : module->signatures) {
- os << " builder.addType(makeSig(" << PrintParameters(sig) << ", "
+ os << "builder.addType(makeSig(" << PrintParameters(sig) << ", "
<< PrintReturns(sig) << "));\n";
}
@@ -183,7 +182,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
// There currently cannot be more than one table.
DCHECK_GE(1, module->tables.size());
for (const WasmTable& table : module->tables) {
- os << " builder.setTableBounds(" << table.initial_size << ", ";
+ os << "builder.setTableBounds(" << table.initial_size << ", ";
if (table.has_maximum_size) {
os << table.maximum_size << ");\n";
} else {
@@ -191,7 +190,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
for (const WasmElemSegment& elem_segment : module->elem_segments) {
- os << " builder.addElementSegment(";
+ os << "builder.addElementSegment(";
switch (elem_segment.offset.kind) {
case WasmInitExpr::kGlobalIndex:
os << elem_segment.offset.val.global_index << ", true";
@@ -207,11 +206,11 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
for (const WasmFunction& func : module->functions) {
Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
- os << " // Generate function " << (func.func_index + 1) << " (out of "
+ os << "// Generate function " << (func.func_index + 1) << " (out of "
<< module->functions.size() << ").\n";
// Add function.
- os << " builder.addFunction(undefined, " << func.sig_index
+ os << "builder.addFunction(undefined, " << func.sig_index
<< " /* sig */)\n";
// Add locals.
@@ -219,41 +218,40 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
DecodeLocalDecls(enabled_features, &decls, func_code.begin(),
func_code.end());
if (!decls.type_list.empty()) {
- os << " ";
+ os << " ";
for (size_t pos = 0, count = 1, locals = decls.type_list.size();
pos < locals; pos += count, count = 1) {
ValueType type = decls.type_list[pos];
while (pos + count < locals && decls.type_list[pos + count] == type)
++count;
- os << ".addLocals({" << ValueTypes::TypeName(type)
- << "_count: " << count << "})";
+ os << ".addLocals({" << type.type_name() << "_count: " << count << "})";
}
os << "\n";
}
// Add body.
- os << " .addBodyWithEnd([\n";
+ os << " .addBodyWithEnd([\n";
FunctionBody func_body(func.sig, func.code.offset(), func_code.begin(),
func_code.end());
PrintRawWasmCode(isolate->allocator(), func_body, module, kOmitLocals);
- os << " ]);\n";
+ os << "]);\n";
}
for (WasmExport& exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
- os << " builder.addExport('" << PrintName(wire_bytes, exp.name) << "', "
+ os << "builder.addExport('" << PrintName(wire_bytes, exp.name) << "', "
<< exp.index << ");\n";
}
if (compiles) {
- os << " const instance = builder.instantiate();\n"
- " print(instance.exports.main(1, 2, 3));\n";
+ os << "const instance = builder.instantiate();\n"
+ "print(instance.exports.main(1, 2, 3));\n";
} else {
- os << " assertThrows(function() { builder.instantiate(); }, "
+ os << "assertThrows(function() { builder.instantiate(); }, "
"WebAssembly.CompileError);\n";
}
- os << "})();\n";
+ os << "\n";
}
void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
@@ -265,6 +263,8 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
FlagScope<bool> enable_##feat(&FLAG_experimental_wasm_##feat, true);
FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
#undef ENABLE_STAGED_FEATURES
+ // SIMD is not included in staging yet, so we enable it here for fuzzing.
+ EXPERIMENTAL_FLAG_SCOPE(simd);
// Strictly enforce the input size limit. Note that setting "max_len" on the
// fuzzer target is not enough, since different fuzzers are used and not all
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 5c68030408..1230a7d46d 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -32,6 +32,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// We reduce the maximum memory size and table size of WebAssembly instances
// to avoid OOMs in the fuzzer.
i::FlagScope<uint32_t> max_mem_flag_scope(&i::FLAG_wasm_max_mem_pages, 32);
+ i::FlagScope<uint32_t> max_mem_growth_flag_scope(
+ &i::FLAG_wasm_max_mem_pages_growth, 32);
i::FlagScope<uint32_t> max_table_size_scope(&i::FLAG_wasm_max_table_size,
100);
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
@@ -69,6 +71,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// Pump the message loop and run micro tasks, e.g. GC finalization tasks.
support->PumpMessageLoop(v8::platform::MessageLoopBehavior::kDoNotWait);
- isolate->RunMicrotasks();
+ isolate->PerformMicrotaskCheckpoint();
return 0;
}
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt
index ad1d9db168..c5b427f475 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm-expected.txt
@@ -1,4 +1,4 @@
Test that console profiles contain wasm function names.
Compiling wasm.
Running fib with increasing input until it shows up in the profile.
-Found fib in profile.
+Found expected functions in profile.
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
index 0541ce02bb..dea818a351 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
@@ -2,13 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// TODO(v8:10266): Figure out why this fails on tsan with --always-opt.
+// Flags: --no-always-opt
+
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test that console profiles contain wasm function names.');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
-// Add fibonacci function.
+// Add fibonacci function, calling back and forth between JS and Wasm to also
+// check for the occurrence of the wrappers.
var builder = new WasmModuleBuilder();
+const imp_index = builder.addImport('q', 'f', kSig_i_i);
builder.addFunction('fib', kSig_i_i)
.addBody([
kExprLocalGet, 0,
@@ -17,9 +22,9 @@ builder.addFunction('fib', kSig_i_i)
kExprI32LeS, // i < 2 ?
kExprBrIf, 0, // --> return i
kExprI32Const, 1, kExprI32Sub, // i - 1
- kExprCallFunction, 0, // fib(i - 1)
+ kExprCallFunction, imp_index, // imp(i - 1)
kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub, // i - 2
- kExprCallFunction, 0, // fib(i - 2)
+ kExprCallFunction, imp_index, // imp(i - 2)
kExprI32Add
])
.exportFunc();
@@ -32,28 +37,38 @@ function compile(bytes) {
view[i] = bytes[i] | 0;
}
let module = new WebAssembly.Module(buffer);
- let instance = new WebAssembly.Instance(module);
+ let fib = undefined;
+ function imp(i) { return fib(i); }
+ let instance = new WebAssembly.Instance(module, {q: {f: imp}});
+ fib = instance.exports.fib;
return instance;
}
-function checkError(message)
-{
- if (message.error) {
- InspectorTest.log("Error: ");
- InspectorTest.logMessage(message);
- InspectorTest.completeTest();
- }
+function checkError(message) {
+ if (!message.error) return;
+ InspectorTest.log('Error: ');
+ InspectorTest.logMessage(message);
+ InspectorTest.completeTest();
}
(async function test() {
Protocol.Profiler.enable();
checkError(await Protocol.Profiler.start());
- let found_fib_in_profile = false;
+ let found_good_profile = false;
let finished_profiles = 0;
Protocol.Profiler.onConsoleProfileFinished(e => {
++finished_profiles;
- if (e.params.profile.nodes.some(n => n.callFrame.functionName === 'fib'))
- found_fib_in_profile = true;
+ let function_names =
+ e.params.profile.nodes.map(n => n.callFrame.functionName);
+ // InspectorTest.log(function_names.join(', '));
+ // Check for at least one full cycle of
+ // fib -> wasm-to-js -> imp -> js-to-wasm -> fib.
+ const expected = ['fib', 'wasm-to-js:i:i', 'imp', 'js-to-wasm:i:i', 'fib'];
+ for (let i = 0; i <= function_names.length - expected.length; ++i) {
+ if (expected.every((val, idx) => val == function_names[i + idx])) {
+ found_good_profile = true;
+ }
+ }
});
InspectorTest.log('Compiling wasm.');
checkError(await Protocol.Runtime.evaluate({
@@ -62,7 +77,7 @@ function checkError(message)
}));
InspectorTest.log(
'Running fib with increasing input until it shows up in the profile.');
- for (let i = 1; !found_fib_in_profile; ++i) {
+ for (let i = 1; !found_good_profile; ++i) {
checkError(await Protocol.Runtime.evaluate(
{expression: 'console.profile(\'profile\');'}));
checkError(await Protocol.Runtime.evaluate(
@@ -75,6 +90,6 @@ function checkError(message)
finished_profiles + ')');
}
}
- InspectorTest.log('Found fib in profile.');
+ InspectorTest.log('Found expected functions in profile.');
InspectorTest.completeTest();
})().catch(e => InspectorTest.log('caught: ' + e));
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt
new file mode 100644
index 0000000000..cb77ef1343
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner-expected.txt
@@ -0,0 +1,4 @@
+Test empty inner classes with private instance methods in the outer class
+
+Running test: testScopesPaused
+undefined
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js
new file mode 100644
index 0000000000..01ad459b8c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-empty-inner.js
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test empty inner classes with private instance methods in the outer class');
+
+contextGroup.addScript(`
+function run() {
+ class Outer {
+ #method() {}
+
+ factory() {
+ return class Inner {
+ fn() {
+ debugger;
+ }
+ };
+ }
+ };
+
+ const a = new Outer();
+ const Inner = a.factory();
+ (new Inner).fn();
+}`);
+
+InspectorTest.runAsyncTestSuite([async function testScopesPaused() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: 'run()'});
+
+ let {params: {callFrames}} =
+ await Protocol.Debugger.oncePaused(); // inside fn()
+ let frame = callFrames[0];
+
+ let {result} =
+ await Protocol.Runtime.getProperties({objectId: frame.this.objectId});
+
+ InspectorTest.logObject(result.privateProperties);
+
+ Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+}]);
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
index d54076b6d5..19a944eebd 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-expected.txt
@@ -171,7 +171,7 @@ Evaluating private methods in the base class from the subclass
columnNumber : 4
exception : {
className : SyntaxError
- description : SyntaxError: Private field '#subclassMethod' must be declared in an enclosing class at B.fn (<anonymous>:16:7) at run (<anonymous>:30:5) at <anonymous>:1:1
+ description : SyntaxError: Private field '#subclassMethod' must be declared in an enclosing class at B.fn (<anonymous>:20:7) at run (<anonymous>:34:5) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
@@ -183,7 +183,7 @@ Evaluating private methods in the base class from the subclass
}
result : {
className : SyntaxError
- description : SyntaxError: Private field '#subclassMethod' must be declared in an enclosing class at B.fn (<anonymous>:16:7) at run (<anonymous>:30:5) at <anonymous>:1:1
+ description : SyntaxError: Private field '#subclassMethod' must be declared in an enclosing class at B.fn (<anonymous>:20:7) at run (<anonymous>:34:5) at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
new file mode 100644
index 0000000000..c129a40cf6
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-expected.txt
@@ -0,0 +1,148 @@
+Test static private class methods
+
+Running test: testScopesPaused
+privateProperties on the base class
+[
+ [0] : {
+ name : #writeOnly
+ set : {
+ className : Function
+ description : set #writeOnly(val) { this.#field = val; }
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [1] : {
+ get : {
+ className : Function
+ description : get #readOnly() { return this.#field; }
+ objectId : <objectId>
+ type : function
+ }
+ name : #readOnly
+ }
+ [2] : {
+ get : {
+ className : Function
+ description : get #accessor() { return this.#field; }
+ objectId : <objectId>
+ type : function
+ }
+ name : #accessor
+ set : {
+ className : Function
+ description : set #accessor(val) { this.#field = val; }
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [3] : {
+ name : #inc
+ value : {
+ className : Function
+ description : #inc() { return ++A.#accessor; }
+ objectId : <objectId>
+ type : function
+ }
+ }
+ [4] : {
+ name : #field
+ value : {
+ description : 2
+ type : number
+ value : 2
+ }
+ }
+]
+Evaluating A.#inc();
+{
+ result : {
+ description : 3
+ type : number
+ value : 3
+ }
+}
+Evaluating this.#inc();
+{
+ result : {
+ description : 4
+ type : number
+ value : 4
+ }
+}
+Evaluating ++this.#accessor;
+{
+ result : {
+ description : 5
+ type : number
+ value : 5
+ }
+}
+Evaluating this.#readOnly;
+{
+ result : {
+ description : 5
+ type : number
+ value : 5
+ }
+}
+Evaluating this.#writeOnly = 0; this.#field;
+{
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+}
+privateProperties on the subclass
+[
+ [0] : {
+ get : {
+ className : Function
+ description : get #accessor() { return 'subclassAccessor'; }
+ objectId : <objectId>
+ type : function
+ }
+ name : #accessor
+ }
+ [1] : {
+ name : #subclassMethod
+ value : {
+ className : Function
+ description : #subclassMethod() { return B.#accessor; }
+ objectId : <objectId>
+ type : function
+ }
+ }
+]
+Evaluating this.#inc(); from the base class
+{
+ exceptionDetails : {
+ columnNumber : 4
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Private field '#inc' must be declared in an enclosing class at Function.test (<anonymous>:24:7) at run (<anonymous>:28:5) at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Private field '#inc' must be declared in an enclosing class at Function.test (<anonymous>:24:7) at run (<anonymous>:28:5) at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+}
+Evaluating this.#subclassMethod();
+{
+ result : {
+ type : string
+ value : subclassAccessor
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
new file mode 100644
index 0000000000..7c9e6b2e1b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
@@ -0,0 +1,17 @@
+Test static private class methods
+
+Running test: testScopesPaused
+privateProperties on class A
+[
+ [0] : {
+ name : #method
+ value : {
+ className : Function
+ description : #method() { debugger; }
+ objectId : {"injectedScriptId":1,"id":39}
+ type : function
+ }
+ }
+]
+privateProperties on class B
+undefined
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js b/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
new file mode 100644
index 0000000000..b26fa13b84
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
@@ -0,0 +1,61 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+ "Test static private class methods"
+);
+
+const script = `
+function run() {
+ class A {
+ static #method() {
+ debugger;
+ }
+ static test() {
+ class B {
+ static test() { debugger; }
+ }
+ A.#method(); // reference #method so it shows up
+ B.test();
+ }
+ }
+ A.test();
+}`;
+
+contextGroup.addScript(script);
+
+InspectorTest.runAsyncTestSuite([
+ async function testScopesPaused() {
+ Protocol.Debugger.enable();
+
+ // Do not await here, instead oncePaused should be awaited.
+ Protocol.Runtime.evaluate({ expression: 'run()' });
+
+ InspectorTest.log('privateProperties on class A');
+ let {
+ params: { callFrames }
+ } = await Protocol.Debugger.oncePaused(); // inside A.#method()
+
+ let frame = callFrames[0];
+ let { result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ });
+ InspectorTest.logObject(result.privateProperties);
+ Protocol.Debugger.resume();
+
+ ({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // B.test();
+ frame = callFrames[0];
+
+ InspectorTest.log('privateProperties on class B');
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ }));
+ InspectorTest.logObject(result.privateProperties);
+
+ Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-preview-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-preview-expected.txt
new file mode 100644
index 0000000000..96f7b01f23
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-preview-expected.txt
@@ -0,0 +1,28 @@
+Check static private methods in object preview.
+
+Running test: testPrivateMethods
+expression: class { static get #readOnly() { return 1; } }
+{
+ type : string
+ value : class { static get #readOnly() { return 1; } }
+}
+expression: class { static set #writeOnly(val) { } }
+{
+ type : string
+ value : class { static set #writeOnly(val) { } }
+}
+expression: class { static #method() { return 1; } static get #accessor() { } static set #accessor(val) { } }
+{
+ type : string
+ value : class { static #method() { return 1; } static get #accessor() { } static set #accessor(val) { } }
+}
+expression: class extends class { } { static #method() { return 1; } }
+{
+ type : string
+ value : class extends class { } { static #method() { return 1; } }
+}
+expression: class extends class { static #method() { return 1; } } { get #accessor() { } set #accessor(val) { } }
+{
+ type : string
+ value : class extends class { static #method() { return 1; } } { get #accessor() { } set #accessor(val) { } }
+}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-preview.js b/deps/v8/test/inspector/debugger/class-private-methods-static-preview.js
new file mode 100644
index 0000000000..3a64ddec77
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-preview.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-private-methods
+
+let {session, contextGroup, Protocol} = InspectorTest.start("Check static private methods in object preview.");
+
+Protocol.Debugger.enable();
+Protocol.Runtime.enable();
+Protocol.Runtime.onConsoleAPICalled(dumpInternalProperties);
+
+contextGroup.setupInjectedScriptEnvironment();
+
+InspectorTest.runAsyncTestSuite([
+ async function testPrivateMethods() {
+ const expressions = [
+ "class { static get #readOnly() { return 1; } }",
+ "class { static set #writeOnly(val) { } }",
+ "class { static #method() { return 1; } static get #accessor() { } static set #accessor(val) { } }",
+ "class extends class { } { static #method() { return 1; } }",
+ "class extends class { static #method() { return 1; } } { get #accessor() { } set #accessor(val) { } }",
+ ];
+ for (const expression of expressions) {
+ InspectorTest.log(`expression: ${expression}`);
+ // Currently the previews are strings of the source code of the classes
+ await Protocol.Runtime.evaluate({
+ expression: `console.table(${expression})`,
+ generatePreview: true
+ });
+ }
+ }
+]);
+
+function dumpInternalProperties(message) {
+ try {
+ InspectorTest.logMessage(message.params.args[0]);
+ } catch {
+ InspectorTest.logMessage(message);
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static.js b/deps/v8/test/inspector/debugger/class-private-methods-static.js
new file mode 100644
index 0000000000..1d493116ef
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static.js
@@ -0,0 +1,122 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+ "Test static private class methods"
+);
+
+const script = `
+function run() {
+ class A {
+ static test() {
+ debugger;
+ }
+ static #field = 2;
+ #instanceMethod() { } // should not show up
+ get #instanceAccessor() { return this.#field; } // should not show up
+ set #instanceAccessor(val) { this.#field = val; } // should not show up
+
+ static set #writeOnly(val) { this.#field = val; }
+ static get #readOnly() { return this.#field; }
+ static get #accessor() { return this.#field; }
+ static set #accessor(val) { this.#field = val; }
+ static #inc() { return ++A.#accessor; }
+ };
+ A.test();
+
+ class B extends A {
+ static get #accessor() { return 'subclassAccessor'; }
+ static #subclassMethod() { return B.#accessor; }
+ static test() {
+ debugger;
+ }
+ }
+
+ B.test();
+}`;
+
+contextGroup.addScript(script);
+
+InspectorTest.runAsyncTestSuite([
+ async function testScopesPaused() {
+ Protocol.Debugger.enable();
+
+ // Do not await here, instead oncePaused should be awaited.
+ Protocol.Runtime.evaluate({ expression: 'run()' });
+
+ InspectorTest.log('privateProperties on the base class');
+ let {
+ params: { callFrames }
+ } = await Protocol.Debugger.oncePaused(); // inside A.test()
+ let frame = callFrames[0];
+ let { result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ });
+ InspectorTest.logMessage(result.privateProperties);
+
+ InspectorTest.log('Evaluating A.#inc();');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'A.#inc();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ InspectorTest.log('Evaluating this.#inc();');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#inc();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ InspectorTest.log('Evaluating ++this.#accessor;');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: '++this.#accessor;',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ InspectorTest.log('Evaluating this.#readOnly;');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#readOnly;',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ InspectorTest.log('Evaluating this.#writeOnly = 0; this.#field;');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#writeOnly = 0; this.#field;',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ Protocol.Debugger.resume();
+ ({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // B.test();
+ frame = callFrames[0];
+
+ InspectorTest.log('privateProperties on the subclass');
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ }));
+ InspectorTest.logMessage(result.privateProperties);
+
+ InspectorTest.log('Evaluating this.#inc(); from the base class');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#inc();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logMessage(result);
+
+ InspectorTest.log('Evaluating this.#subclassMethod();');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#subclassMethod();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logMessage(result);
+
+ Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
new file mode 100644
index 0000000000..4f33b3156e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
@@ -0,0 +1,83 @@
+Test accessing unused private methods at runtime
+
+Running test: testScopesPaused
+Get privateProperties of A in testStatic()
+[
+ [0] : {
+ name : #staticMethod
+ value : {
+ className : Function
+ description : #staticMethod() { return 1; }
+ objectId : {"injectedScriptId":1,"id":34}
+ type : function
+ }
+ }
+]
+Access A.#staticMethod() in testStatic()
+{
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : ReferenceError
+ description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ objectId : {"injectedScriptId":1,"id":36}
+ subtype : error
+ type : object
+ }
+ exceptionId : 1
+ lineNumber : 0
+ scriptId : 5
+ text : Uncaught
+ }
+ result : {
+ className : ReferenceError
+ description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ objectId : {"injectedScriptId":1,"id":35}
+ subtype : error
+ type : object
+ }
+}
+Access this.#staticMethod() in testStatic()
+{
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : Error
+ description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ objectId : {"injectedScriptId":1,"id":38}
+ subtype : error
+ type : object
+ }
+ exceptionId : 2
+ lineNumber : 0
+ scriptId : 6
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
+ objectId : {"injectedScriptId":1,"id":37}
+ subtype : error
+ type : object
+ }
+}
+get privateProperties of a in testInstance()
+[
+ [0] : {
+ name : #instanceMethod
+ value : {
+ className : Function
+ description : #instanceMethod() { return 2; }
+ objectId : {"injectedScriptId":1,"id":61}
+ type : function
+ }
+ }
+]
+Evaluating this.#instanceMethod() in testInstance()
+{
+ result : {
+ description : 2
+ type : number
+ value : 2
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused.js b/deps/v8/test/inspector/debugger/class-private-methods-unused.js
new file mode 100644
index 0000000000..a868735401
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused.js
@@ -0,0 +1,79 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+let { session, contextGroup, Protocol } = InspectorTest.start(
+ "Test accessing unused private methods at runtime"
+);
+
+const script = `
+ function run() {
+ class A {
+ #instanceMethod() { return 2; }
+ static #staticMethod() { return 1; }
+ static testStatic() { debugger; }
+ testInstance() { debugger; }
+ };
+ A.testStatic();
+ const a = new A;
+ a.testInstance();
+ }`;
+
+contextGroup.addScript(script);
+
+InspectorTest.runAsyncTestSuite([
+ async function testScopesPaused() {
+ Protocol.Debugger.enable();
+
+ // Do not await here, instead oncePaused should be awaited.
+ Protocol.Runtime.evaluate({ expression: 'run()' });
+
+ InspectorTest.log('Get privateProperties of A in testStatic()');
+ let {
+ params: { callFrames }
+ } = await Protocol.Debugger.oncePaused(); // inside A.testStatic()
+ let frame = callFrames[0];
+ let { result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ });
+ InspectorTest.logObject(result.privateProperties);
+
+ // Variables not referenced in the source code are currently
+ // considered "optimized away".
+ InspectorTest.log('Access A.#staticMethod() in testStatic()');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'A.#staticMethod();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ InspectorTest.log('Access this.#staticMethod() in testStatic()');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#staticMethod();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ Protocol.Debugger.resume();
+ ({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // a.testInstatnce();
+ frame = callFrames[0];
+
+ InspectorTest.log('get privateProperties of a in testInstance()');
+ ({ result } = await Protocol.Runtime.getProperties({
+ objectId: frame.this.objectId
+ }));
+ InspectorTest.logObject(result.privateProperties);
+
+ InspectorTest.log('Evaluating this.#instanceMethod() in testInstance()');
+ ({ result } = await Protocol.Debugger.evaluateOnCallFrame({
+ expression: 'this.#instanceMethod();',
+ callFrameId: callFrames[0].callFrameId
+ }));
+ InspectorTest.logObject(result);
+
+ Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/class-private-methods.js b/deps/v8/test/inspector/debugger/class-private-methods.js
index 7aa5f7a61f..f92916b254 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods.js
@@ -13,6 +13,10 @@ function run() {
class A {
#field = 2;
+ static #staticMethod() {} // should not show up
+ static get #staticAccessor() { } // should not show up
+ static set #staticAccessor(val) { } // should not show up
+
#inc() { this.#field++; return this.#field; }
set #writeOnly(val) { this.#field = val; }
diff --git a/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt b/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt
index b8d920692f..f91ce03b90 100644
--- a/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt
+++ b/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt
@@ -13,6 +13,7 @@ Running test: testLoadedModulesOnDebuggerEnable
isModule : true
length : 39
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -33,6 +34,7 @@ Running test: testScriptEventsWhenDebuggerIsEnabled
isModule : true
length : 39
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -50,6 +52,7 @@ Running test: testScriptEventsWhenDebuggerIsEnabled
isModule : true
length : 1
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
index 22ef7aae92..374507cbea 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
@@ -1,40 +1,22 @@
getPossibleBreakpoints should not crash during lazy compilation (crbug.com/715334)
{
- method : Debugger.scriptParsed
- params : {
- endColumn : 21
- endLine : 2
- executionContextId : <executionContextId>
- hasSourceURL : true
- hash : 124cb0278e3aa9f250651d433cdefeb5618c7202
- isLiveEdit : false
- isModule : false
- length : 52
- scriptId : <scriptId>
- sourceMapURL :
- startColumn : 0
- startLine : 0
- url : test.js
- }
-}
-{
method : Debugger.scriptFailedToParse
params : {
- endColumn : 21
+ endColumn : 23
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 124cb0278e3aa9f250651d433cdefeb5618c7202
+ hash : 1bce5d0c4da4d13a3ea6e6f35ea0f34705c26ba4
isModule : false
- length : 52
+ length : 56
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
url : test.js
}
}
-One script is reported twice
{
id : <messageId>
result : {
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js
index d1326e967f..3ee7adf578 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js
@@ -3,26 +3,16 @@
// found in the LICENSE file.
let {session, contextGroup, Protocol} = InspectorTest.start('getPossibleBreakpoints should not crash during lazy compilation (crbug.com/715334)');
-
-contextGroup.addScript(`
-function test() { continue; }
-//# sourceURL=test.js`);
-
-(async function test() {
- Protocol.Debugger.enable();
- let script = await Protocol.Debugger.onceScriptParsed();
- InspectorTest.logMessage(script);
- let scriptId = script.params.scriptId;
- Protocol.Debugger.onScriptFailedToParse(msg => {
- InspectorTest.logMessage(msg);
- if (msg.params.scriptId !== script.params.scriptId) {
- InspectorTest.log('Failed script to parse event has different scriptId');
- } else {
- InspectorTest.log('One script is reported twice');
- }
- });
- let response = await Protocol.Debugger.getPossibleBreakpoints({
- start: {scriptId, lineNumber: 0, columnNumber: 0}});
- InspectorTest.logMessage(response);
- InspectorTest.completeTest();
+ (async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.onScriptFailedToParse(async msg => {
+ InspectorTest.logMessage(msg);
+ const response = await Protocol.Debugger.getPossibleBreakpoints({
+ start: {scriptId: msg.params.scriptId, lineNumber: 0, columnNumber: 0}});
+ InspectorTest.logMessage(response);
+ InspectorTest.completeTest();
+ });
+ contextGroup.addScript(`
+ function test() { continue; }
+ //# sourceURL=test.js`);
})();
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
new file mode 100644
index 0000000000..4ca988deca
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
@@ -0,0 +1,2 @@
+Check pause on OOM
+reason: OOM
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-extrawide.js b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide.js
new file mode 100644
index 0000000000..748ad2094e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide.js
@@ -0,0 +1,39 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --max-old-space-size=16
+
+let { session, contextGroup, Protocol } = InspectorTest.start('Check pause on OOM');
+
+var script = `
+var arr = [];
+var stop = false;
+function generateGarbage() {
+ while(!stop) {`
+
+ // Force the JumpLoop to be ExtraWide.
+for (i = 0; i < 110; ++i) {
+ for (j = 0; j < 75; ++j) {
+ script += `arr.push(new Array(1000));`
+ script += `if (stop) { break; }`
+ }
+}
+
+script += `
+ }
+ }
+ //# sourceURL=test.js"`
+
+contextGroup.addScript(script, 10, 26);
+
+Protocol.Debugger.onPaused((message) => {
+ InspectorTest.log(`reason: ${message.params.reason}`);
+ Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: message.params.callFrames[0].callFrameId,
+ expression: 'arr = []; stop = true;'
+ }).then(() => Protocol.Debugger.resume());
+});
+Protocol.Debugger.enable();
+Protocol.Runtime.evaluate({ expression: 'generateGarbage()' })
+ .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
new file mode 100644
index 0000000000..4ca988deca
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
@@ -0,0 +1,2 @@
+Check pause on OOM
+reason: OOM
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-wide.js b/deps/v8/test/inspector/debugger/pause-on-oom-wide.js
new file mode 100644
index 0000000000..56f3f6f0e4
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-wide.js
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --max-old-space-size=16
+
+let { session, contextGroup, Protocol } = InspectorTest.start('Check pause on OOM');
+
+var script = `
+var arr = [];
+var stop = false;
+function generateGarbage() {
+ while(!stop) {`
+
+ // Force the JumpLoop to be Wide.
+for (i = 0; i < 37; ++i) {
+ script += `arr.push(new Array(1000));`
+ script += `if (stop) { break; }`
+}
+
+script += `
+ }
+ }
+ //# sourceURL=test.js"`
+
+contextGroup.addScript(script, 10, 26);
+
+Protocol.Debugger.onPaused((message) => {
+ InspectorTest.log(`reason: ${message.params.reason}`);
+ Protocol.Debugger.evaluateOnCallFrame({
+ callFrameId: message.params.callFrames[0].callFrameId,
+ expression: 'arr = []; stop = true;'
+ }).then(() => Protocol.Debugger.resume());
+});
+Protocol.Debugger.enable();
+Protocol.Runtime.evaluate({ expression: 'generateGarbage()' })
+ .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
index 6f18b7b3e3..a4ff85895e 100644
--- a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
+++ b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
@@ -12,6 +12,7 @@ Check script with url:
isModule : false
length : 16
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -31,6 +32,7 @@ Check script with sourceURL comment:
isModule : false
length : 37
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -49,6 +51,7 @@ Check script failed to parse:
isModule : false
length : 15
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -67,6 +70,7 @@ Check script failed to parse with sourceURL comment:
isModule : false
length : 36
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
index 2079518424..789221a27f 100644
--- a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
@@ -13,6 +13,7 @@ scriptParsed
isModule : false
length : 42
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -32,6 +33,7 @@ scriptParsed
isModule : false
length : 52
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -51,6 +53,7 @@ scriptParsed
isModule : false
length : 43
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -70,6 +73,7 @@ scriptParsed
isModule : false
length : 46
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -89,6 +93,7 @@ scriptParsed
isModule : false
length : 50
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : oneline-map
startColumn : 0
startLine : 0
@@ -108,6 +113,7 @@ scriptParsed
isModule : false
length : 60
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : oneline-without-nl-map
startColumn : 0
startLine : 0
@@ -127,6 +133,7 @@ scriptParsed
isModule : false
length : 51
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : twoline-map
startColumn : 0
startLine : 0
@@ -146,6 +153,7 @@ scriptParsed
isModule : false
length : 54
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : threeline-map
startColumn : 0
startLine : 0
@@ -165,6 +173,7 @@ scriptParsed
isModule : false
length : 88
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : source-mapping-url-map
startColumn : 0
startLine : 0
@@ -184,6 +193,7 @@ scriptParsed
isModule : false
length : 89
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : source-mapping-url-map
startColumn : 0
startLine : 0
@@ -203,6 +213,7 @@ scriptParsed
isModule : false
length : 40
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -222,6 +233,7 @@ scriptParsed
isModule : false
length : 41
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -241,6 +253,7 @@ scriptParsed
isModule : false
length : 18
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -260,6 +273,7 @@ scriptParsed
isModule : false
length : 96
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -279,6 +293,7 @@ scriptParsed
isModule : false
length : 39
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -298,6 +313,7 @@ scriptParsed
isModule : false
length : 19
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -317,6 +333,7 @@ scriptParsed
isModule : false
length : 20
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -336,6 +353,7 @@ scriptParsed
isModule : false
length : 21
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -355,6 +373,7 @@ scriptParsed
isModule : false
length : 22
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -374,6 +393,7 @@ scriptParsed
isModule : false
length : 42
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -393,6 +413,7 @@ scriptParsed
isModule : false
length : 52
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -412,6 +433,7 @@ scriptParsed
isModule : false
length : 43
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -431,6 +453,7 @@ scriptParsed
isModule : false
length : 46
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -450,6 +473,7 @@ scriptParsed
isModule : false
length : 50
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : oneline-map
startColumn : 0
startLine : 0
@@ -469,6 +493,7 @@ scriptParsed
isModule : false
length : 60
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : oneline-without-nl-map
startColumn : 0
startLine : 0
@@ -488,6 +513,7 @@ scriptParsed
isModule : false
length : 51
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : twoline-map
startColumn : 0
startLine : 0
@@ -507,6 +533,7 @@ scriptParsed
isModule : false
length : 54
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : threeline-map
startColumn : 0
startLine : 0
@@ -526,6 +553,7 @@ scriptParsed
isModule : false
length : 88
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : source-mapping-url-map
startColumn : 0
startLine : 0
@@ -545,6 +573,7 @@ scriptParsed
isModule : false
length : 89
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : source-mapping-url-map
startColumn : 0
startLine : 0
@@ -564,6 +593,7 @@ scriptParsed
isModule : false
length : 40
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -583,6 +613,7 @@ scriptParsed
isModule : false
length : 41
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -602,6 +633,7 @@ scriptParsed
isModule : false
length : 18
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -621,6 +653,7 @@ scriptParsed
isModule : false
length : 96
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -640,6 +673,7 @@ scriptParsed
isModule : false
length : 39
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
stackTrace : {
callFrames : [
@@ -669,6 +703,7 @@ scriptFailedToParse
isModule : false
length : 31
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -687,6 +722,7 @@ scriptFailedToParse
isModule : false
length : 56
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL : failed-map
startColumn : 0
startLine : 0
@@ -706,6 +742,7 @@ scriptParsed
isModule : false
length : 19
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -725,6 +762,7 @@ scriptParsed
isModule : false
length : 20
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -744,6 +782,7 @@ scriptParsed
isModule : false
length : 21
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -763,6 +802,7 @@ scriptParsed
isModule : false
length : 22
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
diff --git a/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt b/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt
index 40c7c7d856..f03a2ee524 100644
--- a/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-origin-stack-expected.txt
@@ -11,6 +11,7 @@ Debugger.scriptParsed.stackTrace should contain only one frame
isModule : false
length : 0
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
stackTrace : {
callFrames : [
diff --git a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
index bec2c8354c..9fa4fd4f6f 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
@@ -12,6 +12,7 @@ Runtime.evaluate with valid expression
isModule : false
length : 29
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -30,6 +31,7 @@ Runtime.evaluate with syntax error
isModule : false
length : 39
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -49,6 +51,7 @@ Runtime.callFunctionOn with valid functionDeclaration
isModule : false
length : 18
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -67,6 +70,7 @@ Runtime.callFunctionOn with syntax error
isModule : false
length : 3
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -86,6 +90,7 @@ Runtime.compileScript with valid expression
isModule : false
length : 4
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -104,6 +109,7 @@ Runtime.compileScript with syntax error
isModule : false
length : 1
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -124,6 +130,7 @@ Runtime.evaluate compiled script with stack trace
isModule : false
length : 8
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -142,6 +149,7 @@ Runtime.evaluate compiled script with stack trace
isModule : false
length : 86
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
stackTrace : {
callFrames : [
@@ -171,6 +179,7 @@ Runtime.evaluate compiled script with stack trace
isModule : false
length : 4
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
stackTrace : {
callFrames : [
@@ -201,6 +210,7 @@ Runtime.evaluate compile script error with stack trace
isModule : false
length : 12
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -219,6 +229,7 @@ Runtime.evaluate compile script error with stack trace
isModule : false
length : 98
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
stackTrace : {
callFrames : [
@@ -247,6 +258,7 @@ Runtime.evaluate compile script error with stack trace
isModule : false
length : 3
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
stackTrace : {
callFrames : [
diff --git a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
index 2149d834f8..a4c012d368 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
@@ -9,6 +9,7 @@ Tests scripts hasing
isModule : false
length : 1
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -24,6 +25,7 @@ Tests scripts hasing
isModule : false
length : 3
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -39,6 +41,7 @@ Tests scripts hasing
isModule : false
length : 8106
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
diff --git a/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt b/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt
index d7b67a93de..c0b889121b 100644
--- a/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt
@@ -3,5 +3,5 @@ Waiting for wasm script to be parsed.
Setting breakpoint in wasm.
Running main.
Paused in debugger.
- globals: {"global#0": hello, world}
+ globals: {"m.global": hello, world}
Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt b/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
index b437d1b253..9b22d614e8 100644
--- a/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
@@ -1,5 +1,5 @@
Tests that cloning a module notifies the debugger
-Got URL: wasm://wasm/95d1e44e
-Got URL: wasm://wasm/95d1e44e
-Got URL: wasm://wasm/95d1e44e
+Got URL: wasm://wasm/cae8f226
+Got URL: wasm://wasm/cae8f226
+Got URL: wasm://wasm/cae8f226
Done!
diff --git a/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt b/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt
index eadcb7d671..cd4c9fbd4a 100644
--- a/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt
@@ -3,9 +3,9 @@ Installing code and global variable.
Calling instantiate function.
Waiting for wasm scripts to be parsed.
Ignoring script with url v8://test/callInstantiate
-Got wasm script: wasm://wasm/fa045c1e
+Got wasm script: wasm://wasm/7d022e0e
paused No 1
-Script wasm://wasm/fa045c1e byte offset 35: Wasm opcode 0x20
+Script wasm://wasm/7d022e0e byte offset 35: Wasm opcode 0x20
Debugger.resume
exports.main returned!
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets-expected.txt b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets-expected.txt
index a0ff4a04a9..7a970415a0 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets-expected.txt
@@ -2,82 +2,76 @@ Tests breakable locations in wasm
Running testFunction...
Script nr 0 parsed. URL: v8://test/setup
Script nr 1 parsed. URL: v8://test/runTestFunction
-Script nr 2 parsed. URL: wasm://wasm/6a95b41e
+Script nr 2 parsed. URL: wasm://wasm/354ada0e
This is a wasm script (nr 0).
Querying breakable locations for all wasm scripts now...
Requesting all breakable locations in wasm script 0
Bytecode matches!
-11 breakable location(s):
+10 breakable location(s):
[0] 0:40 || byte=1
[1] 0:41 || byte=65
[2] 0:43 || byte=33
[3] 0:45 || byte=11
[4] 0:48 || byte=32
[5] 0:50 || byte=4
-[6] 0:52 || byte=2
-[7] 0:54 || byte=16
-[8] 0:56 || byte=11
-[9] 0:57 || byte=11
-[10] 0:58 || byte=11
+[6] 0:54 || byte=16
+[7] 0:56 || byte=11
+[8] 0:57 || byte=11
+[9] 0:58 || byte=11
Requesting breakable locations in offsets [0,45)
3 breakable location(s):
[0] 0:40 || byte=1
[1] 0:41 || byte=65
[2] 0:43 || byte=33
Requesting breakable locations in offsets [50,60)
-6 breakable location(s):
+5 breakable location(s):
[0] 0:50 || byte=4
-[1] 0:52 || byte=2
-[2] 0:54 || byte=16
-[3] 0:56 || byte=11
-[4] 0:57 || byte=11
-[5] 0:58 || byte=11
+[1] 0:54 || byte=16
+[2] 0:56 || byte=11
+[3] 0:57 || byte=11
+[4] 0:58 || byte=11
Setting a breakpoint on each breakable location...
-Setting at wasm://wasm/6a95b41e:0:40
+Setting at wasm://wasm/354ada0e:0:40
Success!
-Setting at wasm://wasm/6a95b41e:0:41
+Setting at wasm://wasm/354ada0e:0:41
Success!
-Setting at wasm://wasm/6a95b41e:0:43
+Setting at wasm://wasm/354ada0e:0:43
Success!
-Setting at wasm://wasm/6a95b41e:0:45
+Setting at wasm://wasm/354ada0e:0:45
Success!
-Setting at wasm://wasm/6a95b41e:0:48
+Setting at wasm://wasm/354ada0e:0:48
Success!
-Setting at wasm://wasm/6a95b41e:0:50
+Setting at wasm://wasm/354ada0e:0:50
Success!
-Setting at wasm://wasm/6a95b41e:0:52
+Setting at wasm://wasm/354ada0e:0:54
Success!
-Setting at wasm://wasm/6a95b41e:0:54
+Setting at wasm://wasm/354ada0e:0:56
Success!
-Setting at wasm://wasm/6a95b41e:0:56
+Setting at wasm://wasm/354ada0e:0:57
Success!
-Setting at wasm://wasm/6a95b41e:0:57
-Success!
-Setting at wasm://wasm/6a95b41e:0:58
+Setting at wasm://wasm/354ada0e:0:58
Success!
Running wasm code...
-Missing breakpoints: 11
-Script nr 3 parsed. URL: v8://test/runWasm
-Stopped at wasm://wasm/6a95b41e:0:48
Missing breakpoints: 10
-Stopped at wasm://wasm/6a95b41e:0:50
+Script nr 3 parsed. URL: v8://test/runWasm
+Stopped at wasm://wasm/354ada0e:0:48
Missing breakpoints: 9
-Stopped at wasm://wasm/6a95b41e:0:52
+Stopped at wasm://wasm/354ada0e:0:50
Missing breakpoints: 8
-Stopped at wasm://wasm/6a95b41e:0:54
+Stopped at wasm://wasm/354ada0e:0:54
Missing breakpoints: 7
-Stopped at wasm://wasm/6a95b41e:0:40
+Stopped at wasm://wasm/354ada0e:0:40
Missing breakpoints: 6
-Stopped at wasm://wasm/6a95b41e:0:41
+Stopped at wasm://wasm/354ada0e:0:41
Missing breakpoints: 5
-Stopped at wasm://wasm/6a95b41e:0:43
+Stopped at wasm://wasm/354ada0e:0:43
Missing breakpoints: 4
-Stopped at wasm://wasm/6a95b41e:0:45
+Stopped at wasm://wasm/354ada0e:0:45
Missing breakpoints: 3
-Stopped at wasm://wasm/6a95b41e:0:56
+Stopped at wasm://wasm/354ada0e:0:56
Missing breakpoints: 2
-Stopped at wasm://wasm/6a95b41e:0:57
+Stopped at wasm://wasm/354ada0e:0:57
Missing breakpoints: 1
-Stopped at wasm://wasm/6a95b41e:0:58
+Stopped at wasm://wasm/354ada0e:0:58
Missing breakpoints: 0
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-global-names-expected.txt b/deps/v8/test/inspector/debugger/wasm-global-names-expected.txt
new file mode 100644
index 0000000000..912dbc85e1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-global-names-expected.txt
@@ -0,0 +1,7 @@
+Test wasm global names
+Waiting for wasm script to be parsed.
+Setting breakpoint in wasm.
+Running main.
+Paused in debugger.
+ globals: {"module_name.imported_global", "exported_global", "global2"}
+Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-global-names.js b/deps/v8/test/inspector/debugger/wasm-global-names.js
new file mode 100644
index 0000000000..22d0eb30ea
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-global-names.js
@@ -0,0 +1,82 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test wasm global names');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addImportedGlobal('module_name', 'imported_global', kWasmI32, false);
+let func = builder.addFunction('func', kSig_v_i)
+ .addBody([
+ kExprGlobalGet, 0, //
+ kExprDrop, //
+ ])
+ .exportAs('main');
+var o = builder.addGlobal(kWasmI32, func).exportAs('exported_global');
+builder.addGlobal(kWasmI32); // global2
+let moduleBytes = JSON.stringify(builder.toArray());
+
+function test(moduleBytes) {
+ let module = new WebAssembly.Module((new Uint8Array(moduleBytes)).buffer);
+ let imported_global_value = 123;
+ instance = new WebAssembly.Instance(
+ module, {module_name: {imported_global: imported_global_value}});
+}
+
+(async function() {
+ try {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({
+ expression: `
+ let instance;
+ ${test.toString()}
+ test(${moduleBytes});`
+ });
+
+ InspectorTest.log('Waiting for wasm script to be parsed.');
+ let scriptId;
+ while (true) {
+ let msg = await Protocol.Debugger.onceScriptParsed();
+ if (msg.params.url.startsWith('wasm://')) {
+ scriptId = msg.params.scriptId;
+ break;
+ }
+ }
+
+ InspectorTest.log('Setting breakpoint in wasm.');
+ await Protocol.Debugger.setBreakpoint(
+ {location: {scriptId, lineNumber: 0, columnNumber: func.body_offset}});
+
+ InspectorTest.log('Running main.');
+ Protocol.Runtime.evaluate({expression: 'instance.exports.main()'});
+
+ let msg = await Protocol.Debugger.oncePaused();
+ let callFrames = msg.params.callFrames;
+ InspectorTest.log('Paused in debugger.');
+ let scopeChain = callFrames[0].scopeChain;
+ for (let scope of scopeChain) {
+ if (scope.type != 'global') continue;
+
+ let globalObjectProps = (await Protocol.Runtime.getProperties({
+ 'objectId': scope.object.objectId
+ })).result.result;
+
+ for (let prop of globalObjectProps) {
+ let subProps = (await Protocol.Runtime.getProperties({
+ objectId: prop.value.objectId
+ })).result.result;
+ let values = subProps.map((value) => `"${value.name}"`).join(', ');
+ InspectorTest.log(` ${prop.name}: {${values}}`);
+ }
+ }
+
+ InspectorTest.log('Finished.');
+ } catch (exc) {
+ InspectorTest.log(`Failed with exception: ${exc}.`);
+ } finally {
+ InspectorTest.completeTest();
+ }
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
index eb6b119dc3..fffaba9017 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
@@ -4,16 +4,16 @@ Calling instantiate function for module A.
Waiting for wasm script to be parsed.
Got wasm script!
Setting breakpoint in line 1:
-Script wasm://wasm/1871020e byte offset 33: Wasm opcode 0x1
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01
Calling instantiate function for module B.
Calling main function on module B.
Paused at 0:33.
-Script wasm://wasm/1871020e byte offset 33: Wasm opcode 0x1
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01
Getting current stack trace via "new Error().stack".
Error
at v8://test/getStack:1:1
- at func (wasm-function[0]:0x21)
- at main (wasm-function[1]:0x2f)
+ at func (<anonymous>:wasm-function[0]:0x21)
+ at main (<anonymous>:wasm-function[1]:0x2f)
at v8://test/runWasm:1:22
exports.main returned.
Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
new file mode 100644
index 0000000000..04c48c92f6
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
@@ -0,0 +1,103 @@
+Test inspecting register values in Liftoff.
+Installing instantiate function.
+Testing i32.
+Waiting for wasm script.
+Setting 20 breakpoints.
+Calling main.
+Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
+Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
+Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
+Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
+Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
+Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
+Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
+Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
+Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
+Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
+Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+main returned.
+Testing i64.
+Waiting for wasm script.
+Setting 20 breakpoints.
+Calling main.
+Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
+Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
+Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
+Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
+Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
+Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
+Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
+Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
+Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
+Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
+Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+main returned.
+Testing f32.
+Waiting for wasm script.
+Setting 20 breakpoints.
+Calling main.
+Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
+Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
+Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
+Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
+Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
+Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
+Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
+Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
+Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
+Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
+Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+main returned.
+Testing f64.
+Waiting for wasm script.
+Setting 20 breakpoints.
+Calling main.
+Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
+Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
+Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
+Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
+Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
+Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
+Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
+Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
+Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
+Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
+Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+main returned.
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
new file mode 100644
index 0000000000..a8f8b65586
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
@@ -0,0 +1,122 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --debug-in-liftoff
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test inspecting register values in Liftoff.');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const num_locals = 10;
+const configs = {
+ i32: {type: kWasmI32, add: kExprI32Add, from_i32: kExprNop},
+ i64: {type: kWasmI64, add: kExprI64Add, from_i32: kExprI64SConvertI32},
+ f32: {type: kWasmF32, add: kExprF32Add, from_i32: kExprF32SConvertI32},
+ f64: {type: kWasmF64, add: kExprF64Add, from_i32: kExprF64SConvertI32}
+};
+
+function instantiate(bytes) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ let module = new WebAssembly.Module(buffer);
+ return new WebAssembly.Instance(module);
+}
+
+const evalWithUrl = (code, url) => Protocol.Runtime.evaluate(
+ {'expression': code + '\n//# sourceURL=v8://test/' + url});
+
+Protocol.Debugger.onPaused(async msg => {
+ let loc = msg.params.callFrames[0].location;
+ let line = [`Paused at offset ${loc.columnNumber}`];
+ // Inspect only the top wasm frame.
+ var frame = msg.params.callFrames[0];
+ for (var scope of frame.scopeChain) {
+ if (scope.type != 'local') continue;
+ var scope_properties =
+ await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
+ for (var value of scope_properties.result.result) {
+ let msg = await Protocol.Runtime.getProperties(
+ {objectId: value.value.objectId});
+ let str = msg.result.result.map(elem => elem.value.value).join(', ');
+ line.push(`${value.name} : [${str}]`);
+ }
+ InspectorTest.log(line.join('; '));
+ }
+
+ Protocol.Debugger.resume();
+});
+
+// Build a function which receives a lot of arguments. It loads them all and
+// adds them together.
+// In Liftoff, this will hold many values in registers at the break sites.
+function buildModuleBytes(config) {
+ const sig = makeSig(
+ new Array(num_locals).fill(configs[config].type), [configs[config].type]);
+ const body = [];
+ for (let i = 0; i < num_locals; ++i) body.push(kExprLocalGet, i);
+ for (let i = 0; i < num_locals - 1; ++i) body.push(configs[config].add);
+ body.push(kExprReturn);
+ const builder = new WasmModuleBuilder();
+ const test_func = builder.addFunction('test_' + config, sig).addBody(body);
+ const main_body = [];
+ for (let i = 0; i < num_locals; ++i)
+ main_body.push(kExprI32Const, i, configs[config].from_i32);
+ main_body.push(kExprCallFunction, test_func.index, kExprDrop);
+ const main =
+ builder.addFunction('main', kSig_v_v).addBody(main_body).exportAs('main');
+
+ const module_bytes = builder.toArray();
+
+ // Break at every {kExprLocalGet} and at every addition.
+ const interesting_opcodes = [kExprLocalGet, kExprReturn, configs[config].add];
+ const breakpoints = [];
+ for (let idx = 0; idx < body.length; ++idx) {
+ if (interesting_opcodes.find(elem => elem == body[idx])) {
+ breakpoints.push(test_func.body_offset + idx);
+ }
+ }
+
+ return [module_bytes, breakpoints];
+}
+
+async function testConfig(config) {
+ InspectorTest.log(`Testing ${config}.`);
+ const [module_bytes, breakpoints] = buildModuleBytes(config);
+ const instance_name = `instance_${config}`;
+ // Spawn asynchronously:
+ let instantiate_code = evalWithUrl(
+ `const ${instance_name} = instantiate(${JSON.stringify(module_bytes)});`,
+ 'instantiate');
+ InspectorTest.log('Waiting for wasm script.');
+ const [, {params: wasm_script}] = await Protocol.Debugger.onceScriptParsed(2);
+ InspectorTest.log(`Setting ${breakpoints.length} breakpoints.`);
+ for (let offset of breakpoints) {
+ await Protocol.Debugger.setBreakpoint({
+ 'location': {
+ 'scriptId': wasm_script.scriptId,
+ 'lineNumber': 0,
+ 'columnNumber': offset
+ }
+ });
+ }
+ InspectorTest.log('Calling main.');
+ await evalWithUrl(`${instance_name}.exports.main()`, `run_${config}`);
+ InspectorTest.log('main returned.');
+}
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Installing instantiate function.');
+ await evalWithUrl(instantiate, 'install_instantiate');
+ for (let config in configs) {
+ await testConfig(config);
+ }
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt
index 1554054e46..42e0535d22 100644
--- a/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt
@@ -3,7 +3,7 @@ Installing code and global variable.
Calling instantiate function.
Waiting for wasm scripts to be parsed.
Ignoring script with url v8://test/callInstantiate
-Got wasm script: wasm://wasm/fa045c1e
+Got wasm script: wasm://wasm/7d022e0e
Setting breakpoint on line 3 of wasm function
{
columnNumber : 39
@@ -12,7 +12,7 @@ Setting breakpoint on line 3 of wasm function
}
BreakpointId: 4:0:39:6
paused No 1
-Script wasm://wasm/fa045c1e byte offset 39: Wasm opcode 0x6b
+Script wasm://wasm/7d022e0e byte offset 39: Wasm opcode 0x6b
Remove breakpoint with breakpointId: 4:0:39:6
Debugger.resume
exports.main returned!
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index 1ab0977e1f..45bff036b7 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -9,290 +9,384 @@ Setting breakpoint on first instruction of second function
scriptId : <scriptId>
}
Paused:
-Script wasm://wasm/a6e9de16 byte offset 69: Wasm opcode 0x41
+Script wasm://wasm/d374ef0a byte offset 69: Wasm opcode 0x41
Scope:
at func (0:69):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 4 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 4 (number), "var1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 71: Wasm opcode 0x21
+Script wasm://wasm/d374ef0a byte offset 71: Wasm opcode 0x21
Scope:
at func (0:71):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 4 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 4 (number), "var1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 11 (number)
+ - scope (wasm-expression-stack):
+ 0: 11 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 73: Wasm opcode 0x41
+Script wasm://wasm/d374ef0a byte offset 73: Wasm opcode 0x41
Scope:
at func (0:73):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 75: Wasm opcode 0x21
+Script wasm://wasm/d374ef0a byte offset 75: Wasm opcode 0x21
Scope:
at func (0:75):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 47 (number)
+ - scope (wasm-expression-stack):
+ 0: 47 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 77: Wasm opcode 0x42
+Script wasm://wasm/d374ef0a byte offset 77: Wasm opcode 0x42
Scope:
at func (0:77):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 88: Wasm opcode 0x21
+Script wasm://wasm/d374ef0a byte offset 88: Wasm opcode 0x21
Scope:
at func (0:88):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 9223372036854775807 (string)
+ - scope (wasm-expression-stack):
+ 0: 9223372036854775807 (string)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 90: Wasm opcode 0x42
+Script wasm://wasm/d374ef0a byte offset 90: Wasm opcode 0x42
Scope:
at func (0:90):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 101: Wasm opcode 0x21
+Script wasm://wasm/d374ef0a byte offset 101: Wasm opcode 0x21
Scope:
at func (0:101):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
stack: "0": -9223372036854775808 (string)
+ - scope (wasm-expression-stack):
+ 0: -9223372036854775808 (string)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 103: Wasm opcode 0x41
+Script wasm://wasm/d374ef0a byte offset 103: Wasm opcode 0x41
Scope:
at func (0:103):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 105: Wasm opcode 0xb8
+Script wasm://wasm/d374ef0a byte offset 105: Wasm opcode 0xb8
Scope:
at func (0:105):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 106: Wasm opcode 0x41
+Script wasm://wasm/d374ef0a byte offset 106: Wasm opcode 0x41
Scope:
at func (0:106):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 108: Wasm opcode 0xb8
+Script wasm://wasm/d374ef0a byte offset 108: Wasm opcode 0xb8
Scope:
at func (0:108):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number), "1": 7 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
+ 1: 7 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 109: Wasm opcode 0xa3
+Script wasm://wasm/d374ef0a byte offset 109: Wasm opcode 0xa3
Scope:
at func (0:109):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number), "1": 7 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
+ 1: 7 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 110: Wasm opcode 0x21
+Script wasm://wasm/d374ef0a byte offset 110: Wasm opcode 0x21
Scope:
at func (0:110):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 0.14285714285714285 (number)
+ - scope (wasm-expression-stack):
+ 0: 0.14285714285714285 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 112: Wasm opcode 0x41
+Script wasm://wasm/d374ef0a byte offset 112: Wasm opcode 0x41
Scope:
at func (0:112):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 114: Wasm opcode 0x24
+Script wasm://wasm/d374ef0a byte offset 114: Wasm opcode 0x24
Scope:
at func (0:114):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
stack: "0": 15 (number)
+ - scope (wasm-expression-stack):
+ 0: 15 (number)
at call_func (0:58):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/a6e9de16 byte offset 116: Wasm opcode 0xb
+Script wasm://wasm/d374ef0a byte offset 116: Wasm opcode 0x0b
Scope:
at func (0:116):
- scope (global):
- globals: "global#0": 15 (number)
+ globals: "global0": 15 (number)
- scope (local):
- locals: "0": 0 (number), "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
+ locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
stack:
+ - scope (wasm-expression-stack):
at call_func (0:58):
- scope (global):
- globals: "global#0": 15 (number)
+ globals: "global0": 15 (number)
- scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/d374ef0a byte offset 60: Wasm opcode 0x0b
+Scope:
+at call_func (0:60):
+ - scope (global):
+ globals: "global0": 15 (number)
+ - scope (local):
+ locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
index 4d6139e425..292f0074a3 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
@@ -10,126 +10,233 @@ Setting breakpoint on line 2 (first instruction) of third function
scriptId : <scriptId>
}
Paused:
-Script wasm://wasm/89d60696 byte offset 85: Wasm opcode 0x20
+Script wasm://wasm/c4eb034a byte offset 85: Wasm opcode 0x20
Scope:
at C (interpreted) (0:85):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at B (liftoff) (0:76):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "local#5": 0 (number)
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
stack: "0": 42 (number), "1": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+ 1: 3 (number)
at A (liftoff) (0:54):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "arg#0": 42 (number)
+ locals: "var0": 42 (number)
stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/89d60696 byte offset 87: Wasm opcode 0x24
+Script wasm://wasm/c4eb034a byte offset 87: Wasm opcode 0x24
Scope:
at C (interpreted) (0:87):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
stack: "0": 42 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
at B (liftoff) (0:76):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "local#5": 0 (number)
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
stack: "0": 42 (number), "1": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+ 1: 3 (number)
at A (liftoff) (0:54):
- scope (global):
- globals: "global#0": 0 (number)
+ globals: "global0": 0 (number)
- scope (local):
- locals: "arg#0": 42 (number)
+ locals: "var0": 42 (number)
stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/89d60696 byte offset 89: Wasm opcode 0x41
+Script wasm://wasm/c4eb034a byte offset 89: Wasm opcode 0x41
Scope:
at C (interpreted) (0:89):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
stack:
+ - scope (wasm-expression-stack):
at B (liftoff) (0:76):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
- locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "local#5": 0 (number)
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
stack: "0": 42 (number), "1": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+ 1: 3 (number)
at A (liftoff) (0:54):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
- locals: "arg#0": 42 (number)
+ locals: "var0": 42 (number)
stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/89d60696 byte offset 91: Wasm opcode 0x21
+Script wasm://wasm/c4eb034a byte offset 91: Wasm opcode 0x21
Scope:
at C (interpreted) (0:91):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
stack: "0": 47 (number)
+ - scope (wasm-expression-stack):
+ 0: 47 (number)
at B (liftoff) (0:76):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
- locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "local#5": 0 (number)
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
stack: "0": 42 (number), "1": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+ 1: 3 (number)
at A (liftoff) (0:54):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
- locals: "arg#0": 42 (number)
+ locals: "var0": 42 (number)
stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
Paused:
-Script wasm://wasm/89d60696 byte offset 93: Wasm opcode 0xb
+Script wasm://wasm/c4eb034a byte offset 93: Wasm opcode 0x0b
Scope:
at C (interpreted) (0:93):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 47 (number)
stack:
+ - scope (wasm-expression-stack):
at B (liftoff) (0:76):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
- locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "local#5": 0 (number)
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
stack: "0": 42 (number), "1": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+ 1: 3 (number)
at A (liftoff) (0:54):
- scope (global):
- globals: "global#0": 42 (number)
+ globals: "global0": 42 (number)
- scope (local):
- locals: "arg#0": 42 (number)
+ locals: "var0": 42 (number)
stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c4eb034a byte offset 78: Wasm opcode 0x1a
+Scope:
+at B (liftoff) (0:78):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
+ stack: "0": 42 (number), "1": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+ 1: 3 (number)
+at A (liftoff) (0:54):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "var0": 42 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c4eb034a byte offset 79: Wasm opcode 0x1a
+Scope:
+at B (liftoff) (0:79):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
+ stack: "0": 42 (number)
+ - scope (wasm-expression-stack):
+ 0: 42 (number)
+at A (liftoff) (0:54):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "var0": 42 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c4eb034a byte offset 80: Wasm opcode 0x0b
+Scope:
+at B (liftoff) (0:80):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at A (liftoff) (0:54):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "var0": 42 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c4eb034a byte offset 56: Wasm opcode 0x0b
+Scope:
+at A (liftoff) (0:56):
+ - scope (global):
+ globals: "global0": 42 (number)
+ - scope (local):
+ locals: "var0": 42 (number)
+ stack:
+ - scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
-- skipped globals
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
index a945d77626..ef7ec63b03 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(clemensb): In the long term this test should also work without any
-// flags, once we tier down to Liftoff when the debugger is enabled.
-// Flags: --liftoff --no-wasm-tier-up
+// Flags: --debug-in-liftoff
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test retrieving scope information from compiled Liftoff frames');
@@ -149,17 +147,15 @@ async function waitForWasmScripts() {
}
async function getScopeValues(value) {
- if (value.type != 'object') {
- InspectorTest.log('Expected object. Found:');
- InspectorTest.logObject(value);
- return;
+ if (value.type == 'object') {
+ let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ printIfFailure(msg);
+ const printProperty = function(elem) {
+ return `"${elem.name}": ${elem.value.value} (${elem.value.type})`;
+ }
+ return msg.result.result.map(printProperty).join(', ');
}
-
- let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
- printIfFailure(msg);
- let printProperty = elem => '"' + elem.name + '"' +
- ': ' + elem.value.value + ' (' + elem.value.type + ')';
- return msg.result.result.map(printProperty).join(', ');
+ return value.value + ' (' + value.type + ')';
}
async function dumpScopeProperties(message) {
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index 693fb00277..413aa69ce1 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Disable Liftoff to get deterministic (non-existing) scope information for
-// compiled frames.
-// Flags: --no-liftoff
+// Flags: --debug-in-liftoff
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test retrieving scope information when pausing in wasm functions');
@@ -147,17 +145,15 @@ async function waitForWasmScripts() {
}
async function getScopeValues(value) {
- if (value.type != 'object') {
- InspectorTest.log('Expected object. Found:');
- InspectorTest.logObject(value);
- return;
+ if (value.type == 'object') {
+ let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ printIfFailure(msg);
+ const printProperty = function(elem) {
+ return `"${elem.name}": ${elem.value.value} (${elem.value.type})`;
+ }
+ return msg.result.result.map(printProperty).join(', ');
}
-
- let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
- printIfFailure(msg);
- let printProperty = elem => '"' + elem.name + '"' +
- ': ' + elem.value.value + ' (' + elem.value.type + ')';
- return msg.result.result.map(printProperty).join(', ');
+ return value.value + ' (' + value.type + ')';
}
async function dumpScopeProperties(message) {
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
index 3918e210a3..7b76eab18b 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
@@ -1,52 +1,52 @@
Tests how wasm scripts are reported
Check that each inspector gets a wasm script at module creation time.
-Session #1: Script #0 parsed. URL: wasm://wasm/f608ae1e. Source map URL:
-Session #2: Script #0 parsed. URL: wasm://wasm/f608ae1e. Source map URL:
-Session #1: Script #1 parsed. URL: wasm://wasm/74f86b7e. Source map URL: wasm://dwarf
-Session #2: Script #1 parsed. URL: wasm://wasm/74f86b7e. Source map URL: wasm://dwarf
-Session #1: Script #2 parsed. URL: wasm://wasm/3754e3fe. Source map URL: abc
-Session #2: Script #2 parsed. URL: wasm://wasm/3754e3fe. Source map URL: abc
-Session #1: Script #3 parsed. URL: wasm://wasm/2bd2e40e. Source map URL: abc
-Session #2: Script #3 parsed. URL: wasm://wasm/2bd2e40e. Source map URL: abc
-Session #1: Script #4 parsed. URL: wasm://wasm/f568e726. Source map URL: abc
-Session #2: Script #4 parsed. URL: wasm://wasm/f568e726. Source map URL: abc
-Session #1: Source for wasm://wasm/f608ae1e:
+Session #1: Script #0 parsed. URL: wasm://wasm/7b04570e. Source map URL: , module begin: 0, module end: 0, code offset: 34
+Session #2: Script #0 parsed. URL: wasm://wasm/7b04570e. Source map URL: , module begin: 0, module end: 0, code offset: 34
+Session #1: Script #1 parsed. URL: wasm://wasm/ba7c35be. Source map URL: wasm://dwarf, module begin: 0, module end: 0, code offset: 34
+Session #2: Script #1 parsed. URL: wasm://wasm/ba7c35be. Source map URL: wasm://dwarf, module begin: 0, module end: 0, code offset: 34
+Session #1: Script #2 parsed. URL: wasm://wasm/1baa71fe. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #2: Script #2 parsed. URL: wasm://wasm/1baa71fe. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #1: Script #3 parsed. URL: wasm://wasm/95e97206. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #2: Script #3 parsed. URL: wasm://wasm/95e97206. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #1: Script #4 parsed. URL: wasm://wasm/7ab47392. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #2: Script #4 parsed. URL: wasm://wasm/7ab47392. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #1: Source for wasm://wasm/7b04570e:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #2: Source for wasm://wasm/f608ae1e:
+Session #2: Source for wasm://wasm/7b04570e:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #1: Source for wasm://wasm/74f86b7e:
+Session #1: Source for wasm://wasm/ba7c35be:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #2: Source for wasm://wasm/74f86b7e:
+Session #2: Source for wasm://wasm/ba7c35be:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #1: Source for wasm://wasm/3754e3fe:
+Session #1: Source for wasm://wasm/1baa71fe:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #2: Source for wasm://wasm/3754e3fe:
+Session #2: Source for wasm://wasm/1baa71fe:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #1: Source for wasm://wasm/2bd2e40e:
+Session #1: Source for wasm://wasm/95e97206:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #2: Source for wasm://wasm/2bd2e40e:
+Session #2: Source for wasm://wasm/95e97206:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #1: Source for wasm://wasm/f568e726:
+Session #1: Source for wasm://wasm/7ab47392:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
-Session #2: Source for wasm://wasm/f568e726:
+Session #2: Source for wasm://wasm/7ab47392:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts-with-name-expected.txt b/deps/v8/test/inspector/debugger/wasm-scripts-with-name-expected.txt
index f655ff17e7..a5d8c16ad9 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts-with-name-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scripts-with-name-expected.txt
@@ -1,7 +1,7 @@
Tests how wasm scripts are reported with name
Check that the inspector gets four wasm scripts at module creation time.
-Session #1: Script #0 parsed. URL: wasm://wasm/49a8663e.
-Session #1: Script #1 parsed. URL: wasm://wasm/moduleName-aea4a206.
-Session #1: Source for wasm://wasm/49a8663e:
+Session #1: Script #0 parsed. URL: wasm://wasm/a4d4331e.
+Session #1: Script #1 parsed. URL: wasm://wasm/moduleName-d7525102.
+Session #1: Source for wasm://wasm/a4d4331e:
-Session #1: Source for wasm://wasm/moduleName-aea4a206:
+Session #1: Source for wasm://wasm/moduleName-d7525102:
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 6a342922a8..5ad5ff09de 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -85,9 +85,13 @@ function trackScripts(debuggerParams) {
Protocol.Debugger.enable(debuggerParams);
Protocol.Debugger.onScriptParsed(handleScriptParsed);
- async function loadScript({url, scriptId, sourceMapURL}) {
- InspectorTest.log(`Session #${sessionId}: Script #${scripts.length} parsed. URL: ${url}. Source map URL: ${sourceMapURL}`);
- let {result: {scriptSource, bytecode}} = await Protocol.Debugger.getScriptSource({scriptId});
+ async function loadScript(
+ {url, scriptId, sourceMapURL, startColumn, endColumn, codeOffset}) {
+ InspectorTest.log(`Session #${sessionId}: Script #${
+ scripts.length} parsed. URL: ${url}. Source map URL: ${
+ sourceMapURL}, module begin: ${startColumn}, module end: ${endColumn}, code offset: ${codeOffset}`);
+ let {result: {scriptSource, bytecode}} =
+ await Protocol.Debugger.getScriptSource({scriptId});
if (bytecode) {
if (scriptSource) {
InspectorTest.log('Unexpected scriptSource with bytecode: ');
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index b53a17610b..bb65e4f47d 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -1,12 +1,12 @@
Tests stepping through wasm scripts.
Instantiating.
Waiting for wasm script (ignoring first non-wasm script).
-Setting breakpoint at offset 54 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 53 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 51 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 49 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 45 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 47 on script wasm://wasm/18214bfe
+Setting breakpoint at offset 54 on script wasm://wasm/0c10a5fe
+Setting breakpoint at offset 53 on script wasm://wasm/0c10a5fe
+Setting breakpoint at offset 51 on script wasm://wasm/0c10a5fe
+Setting breakpoint at offset 49 on script wasm://wasm/0c10a5fe
+Setting breakpoint at offset 45 on script wasm://wasm/0c10a5fe
+Setting breakpoint at offset 47 on script wasm://wasm/0c10a5fe
Calling main(4)
Breaking on byte offset 45
Breaking on byte offset 47
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
index 735fc49f44..43827fef3b 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
@@ -1,12 +1,391 @@
Tests stepping through wasm scripts.
Instantiating.
Waiting for wasm script (ignoring first non-wasm script).
-Setting breakpoint at offset 54 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 53 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 51 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 49 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 45 on script wasm://wasm/18214bfe
-Setting breakpoint at offset 47 on script wasm://wasm/18214bfe
+Setting breakpoint at offset 38 on script wasm://wasm/0c10a5fe
Calling main(4)
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Scope:
+at wasm_A (0:38):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Setting breakpoint at offset 39 on script v8://test/runWasm
+Setting breakpoint at offset 61 on script v8://test/runWasm
+Setting breakpoint at offset 60 on script v8://test/runWasm
+Setting breakpoint at offset 54 on script v8://test/runWasm
+Setting breakpoint at offset 53 on script v8://test/runWasm
+Setting breakpoint at offset 51 on script v8://test/runWasm
+Setting breakpoint at offset 49 on script v8://test/runWasm
+Setting breakpoint at offset 45 on script v8://test/runWasm
+Setting breakpoint at offset 47 on script v8://test/runWasm
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Scope:
+at wasm_A (0:39):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Scope:
+at wasm_B (0:45):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Scope:
+at wasm_B (0:47):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack: "0": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 3 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
+Scope:
+at wasm_B (0:49):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
+Scope:
+at wasm_B (0:51):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack: "0": 3 (number)
+ - scope (wasm-expression-stack):
+ 0: 3 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
+Scope:
+at wasm_B (0:53):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack: "0": 3 (number), "1": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 3 (number)
+ 1: 1 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
+Scope:
+at wasm_B (0:54):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 3 (number)
+ stack: "0": 2 (number)
+ - scope (wasm-expression-stack):
+ 0: 2 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Scope:
+at wasm_A (0:38):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Scope:
+at wasm_A (0:39):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Scope:
+at wasm_B (0:45):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Scope:
+at wasm_B (0:47):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack: "0": 2 (number)
+ - scope (wasm-expression-stack):
+ 0: 2 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
+Scope:
+at wasm_B (0:49):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
+Scope:
+at wasm_B (0:51):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack: "0": 2 (number)
+ - scope (wasm-expression-stack):
+ 0: 2 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
+Scope:
+at wasm_B (0:53):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack: "0": 2 (number), "1": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 2 (number)
+ 1: 1 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
+Scope:
+at wasm_B (0:54):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 2 (number)
+ stack: "0": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Scope:
+at wasm_A (0:38):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Scope:
+at wasm_A (0:39):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Scope:
+at wasm_B (0:45):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Scope:
+at wasm_B (0:47):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack: "0": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
+Scope:
+at wasm_B (0:49):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
+Scope:
+at wasm_B (0:51):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack: "0": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
+Scope:
+at wasm_B (0:53):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack: "0": 1 (number), "1": 1 (number)
+ - scope (wasm-expression-stack):
+ 0: 1 (number)
+ 1: 1 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
+Scope:
+at wasm_B (0:54):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 1 (number)
+ stack: "0": 0 (number)
+ - scope (wasm-expression-stack):
+ 0: 0 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Scope:
+at wasm_A (0:38):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 0 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Scope:
+at wasm_A (0:39):
+ - scope (global):
+ - scope (local):
+ stack:
+ - scope (wasm-expression-stack):
+at wasm_B (0:56):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 0 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Scope:
+at wasm_B (0:45):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 0 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Scope:
+at wasm_B (0:47):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 0 (number)
+ stack: "0": 0 (number)
+ - scope (wasm-expression-stack):
+ 0: 0 (number)
+at (anonymous) (0:17):
+ -- skipped
+Paused:
+Script wasm://wasm/0c10a5fe byte offset 61: Wasm opcode 0x0b
+Scope:
+at wasm_B (0:61):
+ - scope (global):
+ - scope (local):
+ locals: "var0": 0 (number)
+ stack:
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ -- skipped
exports.main returned!
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js
index ce70f91921..3e8d783af6 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js
@@ -6,29 +6,30 @@
const {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts.');
+session.setupScriptMap();
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
-const func_a_idx =
- builder.addFunction('wasm_A', kSig_v_v).addBody([kExprNop, kExprNop]).index;
+const func_a =
+ builder.addFunction('wasm_A', kSig_v_v).addBody([kExprNop, kExprNop]);
// wasm_B calls wasm_A <param0> times.
const func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
- kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
- kExprLocalGet, 0, // -
- kExprI32Const, 1, // -
- kExprI32Sub, // -
- kExprLocalSet, 0, // decrease <param0>
- kExprCallFunction, func_a_idx, // -
- kExprBr, 1, // continue
- kExprEnd, // -
- kExprEnd, // break
+ kExprLoop, kWasmStmt, // while
+ kExprLocalGet, 0, // -
+ kExprIf, kWasmStmt, // if <param0> != 0
+ kExprLocalGet, 0, // -
+ kExprI32Const, 1, // -
+ kExprI32Sub, // -
+ kExprLocalSet, 0, // decrease <param0>
+ kExprCallFunction, func_a.index, // -
+ kExprBr, 1, // continue
+ kExprEnd, // -
+ kExprEnd, // break
// clang-format on
])
.exportAs('main');
@@ -53,24 +54,68 @@ const evalWithUrl = (code, url) =>
.evaluate({'expression': code + '\n//# sourceURL=v8://test/' + url})
.then(getResult);
-function setBreakpoint(offset, script) {
+function setBreakpoint(offset, scriptId, scriptUrl) {
InspectorTest.log(
- 'Setting breakpoint at offset ' + offset + ' on script ' + script.url);
+ 'Setting breakpoint at offset ' + offset + ' on script ' + scriptUrl);
return Protocol.Debugger
.setBreakpoint(
- {'location': {'scriptId': script.scriptId, 'lineNumber': 0, 'columnNumber': offset}})
+ {'location': {'scriptId': scriptId, 'lineNumber': 0, 'columnNumber': offset}})
.then(getResult);
}
-Protocol.Debugger.onPaused(pause_msg => {
- let loc = pause_msg.params.callFrames[0].location;
- if (loc.lineNumber != 0) {
- InspectorTest.log('Unexpected line number: ' + loc.lineNumber);
+// Only set breakpoints during the first loop iteration.
+var first_iteration = true;
+
+Protocol.Debugger.onPaused(async msg => {
+ let loc = msg.params.callFrames[0].location;
+ InspectorTest.log('Paused:');
+ await session.logSourceLocation(loc);
+ InspectorTest.log('Scope:');
+ for (var frame of msg.params.callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var lineNumber = frame.location.lineNumber;
+ var columnNumber = frame.location.columnNumber;
+ InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
+ if (!/^wasm/.test(frame.url)) {
+ InspectorTest.log(' -- skipped');
+ continue;
+ }
+ for (var scope of frame.scopeChain) {
+ InspectorTest.logObject(' - scope (' + scope.type + '):');
+ var properties = await Protocol.Runtime.getProperties(
+ {'objectId': scope.object.objectId});
+ for (var value of properties.result.result) {
+ var value_str = await getScopeValues(value.value);
+ InspectorTest.log(' ' + value.name + ': ' + value_str);
+ }
+ }
+ }
+
+ if (first_iteration && loc.columnNumber == func_a.body_offset) {
+ // Check that setting breakpoints on active instances of A and B takes
+ // effect immediately.
+ setBreakpoint(func_a.body_offset + 1, loc.scriptId, frame.url);
+ // All of the following breakpoints are in reachable code, except offset 17.
+ for (offset of [18, 17, 11, 10, 8, 6, 2, 4]) {
+ setBreakpoint(func_b.body_offset + offset, loc.scriptId, frame.url);
+ }
+ first_iteration = false;
}
- InspectorTest.log('Breaking on byte offset ' + loc.columnNumber);
+
Protocol.Debugger.resume();
});
+async function getScopeValues(value) {
+ if (value.type == 'object') {
+ let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ const printProperty = function(elem) {
+ return `"${elem.name}": ${elem.value.value} (${elem.value.type})`;
+ }
+ return msg.result.result.map(printProperty).join(', ');
+ }
+ return value.value + ' (' + value.type + ')';
+}
+
(async function test() {
await Protocol.Debugger.enable();
InspectorTest.log('Instantiating.');
@@ -82,9 +127,9 @@ Protocol.Debugger.onPaused(pause_msg => {
'Waiting for wasm script (ignoring first non-wasm script).');
// Ignore javascript and full module wasm script, get scripts for functions.
const [, {params: wasm_script}] = await Protocol.Debugger.onceScriptParsed(2);
- for (offset of [11, 10, 8, 6, 2, 4]) {
- await setBreakpoint(func_b.body_offset + offset, wasm_script);
- }
+ // Set a breakpoint in function A at offset 0. When the debugger hits this
+ // breakpoint, new ones will be added.
+ await setBreakpoint(func_a.body_offset, wasm_script.scriptId, wasm_script.url);
InspectorTest.log('Calling main(4)');
await evalWithUrl('instance.exports.main(4)', 'runWasm');
InspectorTest.log('exports.main returned!');
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
index 364a319b93..1ff78f3543 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
@@ -12,8 +12,8 @@ Result of evaluate (string):
Error: this is your stack trace:
-- skipped --
at call_debugger (<anonymous>:3:5)
- at call_func (wasm-function[1]:0x37)
- at main (wasm-function[2]:0x3e)
+ at call_func (<anonymous>:wasm-function[1]:0x37)
+ at main (<anonymous>:wasm-function[2]:0x3e)
at testFunction (<anonymous>:15:20)
at <anonymous>:1:1
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
index 409e7cc002..54e567e65b 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
@@ -1,297 +1,342 @@
Tests stepping through wasm scripts by byte offsets
Setting up global instance variable.
-Got wasm script: wasm://wasm/7dfc8356
-Setting breakpoint on offset 59 (should be propagated to 60, the offset of the call), url wasm://wasm/7dfc8356
+Got wasm script: wasm://wasm/befe41aa
+Setting breakpoint on offset 59 (should be propagated to 60, the offset of the call), url wasm://wasm/befe41aa
{
columnNumber : 60
lineNumber : 0
scriptId : <scriptId>
}
-Paused at wasm://wasm/7dfc8356:0:60
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {"0":1024}
+ - scope (wasm-expression-stack):
+ {"0":1024}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:39
+Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1024}
+ locals: {"var0":1024}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/7dfc8356:0:40
+Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
at wasm_A (0:40):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1024}
+ locals: {"var0":1024}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/7dfc8356:0:62
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/7dfc8356:0:60
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":2}
+ locals: {"var0":2}
stack: {"0":1024}
+ - scope (wasm-expression-stack):
+ {"0":1024}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/7dfc8356:0:62
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":2}
+ locals: {"var0":2}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:44
-at wasm_B (0:44):
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+at wasm_B (0:46):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":2}
+ locals: {"var0":2}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.resume called
-Paused at wasm://wasm/7dfc8356:0:60
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1024}
+ - scope (wasm-expression-stack):
+ {"0":1024}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:39
+Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1024}
+ locals: {"var0":1024}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/7dfc8356:0:62
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
- stack: {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:44
-at wasm_B (0:44):
- - scope (global):
- -- skipped
- - scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:46
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
at wasm_B (0:46):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:48
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
at wasm_B (0:48):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1}
+ - scope (wasm-expression-stack):
+ {"0":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:50
+Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
at wasm_B (0:50):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:52
+Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
at wasm_B (0:52):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1}
+ - scope (wasm-expression-stack):
+ {"0":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:54
+Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
at wasm_B (0:54):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1,"1":1}
+ - scope (wasm-expression-stack):
+ {"0":1,"1":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:55
+Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
at wasm_B (0:55):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":0}
+ - scope (wasm-expression-stack):
+ {"0":0}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:57
+Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
at wasm_B (0:57):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:60
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {"0":1024}
+ - scope (wasm-expression-stack):
+ {"0":1024}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:39
+Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1024}
+ locals: {"var0":1024}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:40
+Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
at wasm_A (0:40):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1024}
+ locals: {"var0":1024}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:41
+Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
at wasm_A (0:41):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1024}
+ locals: {"var0":1024}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:60):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/7dfc8356:0:62
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
index 738544d418..d3c1932973 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
@@ -4,6 +4,7 @@
let {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts by byte offsets');
+session.setupScriptMap();
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -82,8 +83,8 @@ function instantiate(bytes) {
await waitForPauseAndStep('resume'); // to next breakpoint (3rd iteration)
await waitForPauseAndStep('stepInto'); // into wasm_A
await waitForPauseAndStep('stepOut'); // out to wasm_B
- // Now step 10 times, until we are in wasm_A again.
- for (let i = 0; i < 10; ++i) await waitForPauseAndStep('stepInto');
+ // Now step 9 times, until we are in wasm_A again.
+ for (let i = 0; i < 9; ++i) await waitForPauseAndStep('stepInto');
// 3 more times, back to wasm_B.
for (let i = 0; i < 3; ++i) await waitForPauseAndStep('stepInto');
// Then just resume.
@@ -95,9 +96,7 @@ function instantiate(bytes) {
async function waitForPauseAndStep(stepAction) {
const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
- const topFrame = callFrames[0];
- InspectorTest.log(
- `Paused at ${topFrame.url}:${topFrame.location.lineNumber}:${topFrame.location.columnNumber}`);
+ await session.logSourceLocation(callFrames[0].location);
for (var frame of callFrames) {
const functionName = frame.functionName || '(anonymous)';
const lineNumber = frame.location.lineNumber;
@@ -114,9 +113,13 @@ async function waitForPauseAndStep(stepAction) {
functionDeclaration: 'function() { return this; }',
returnByValue: true
});
- if (value.locals)
- InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
- InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
+ if (scope.type === 'local') {
+ if (value.locals)
+ InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
+ InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
+ } else {
+ InspectorTest.log(` ${JSON.stringify(value)}`);
+ }
}
}
}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt
index 422aa8b9f3..89d64b9513 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt
@@ -3,8 +3,8 @@ Installing code and global variable.
Calling instantiate function.
Waiting for wasm scripts to be parsed.
Ignoring script with url v8://test/callInstantiate
-Got wasm script: wasm://wasm/fa045c1e
-Setting breakpoint on line 3 of wasm function
+Got wasm script: wasm://wasm/7d022e0e
+Setting breakpoint on i32.const
{
columnNumber : 37
lineNumber : 0
@@ -23,10 +23,10 @@ paused
Debugger.stepInto
paused
-Script wasm://wasm/fa045c1e byte offset 35: Wasm opcode 0x20
+Script wasm://wasm/7d022e0e byte offset 35: Wasm opcode 0x20
Debugger.resume
paused
-Script wasm://wasm/fa045c1e byte offset 37: Wasm opcode 0x41
+Script wasm://wasm/7d022e0e byte offset 37: Wasm opcode 0x41
Debugger.resume
exports.main returned!
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js.js b/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js.js
index f8c0b9dbbb..510c1c0073 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-let {session, contextGroup, Protocol} = InspectorTest.start('Tests stepping from javascript into wasm');
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests stepping from javascript into wasm');
session.setupScriptMap();
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -11,14 +12,12 @@ let builder = new WasmModuleBuilder();
// wasm_A
let func = builder.addFunction('wasm_A', kSig_i_i)
- .addBody([
- // clang-format off
- kExprLocalGet, 0, // Line 1: get input
- kExprI32Const, 1, // Line 2: get constant 1
- kExprI32Sub // Line 3: decrease
- // clang-format on
- ])
- .exportAs('main');
+ .addBody([
+ kExprLocalGet, 0, // push param 0
+ kExprI32Const, 1, // push constant 1
+ kExprI32Sub // subtract
+ ])
+ .exportAs('main');
let module_bytes = builder.toArray();
@@ -38,7 +37,7 @@ let evalWithUrl = (code, url) => Protocol.Runtime.evaluate(
{'expression': code + '\n//# sourceURL=v8://test/' + url});
Protocol.Debugger.onPaused(async message => {
- InspectorTest.log("paused");
+ InspectorTest.log('paused');
var frames = message.params.callFrames;
await session.logSourceLocation(frames[0].location);
let action = step_actions.shift() || 'resume';
@@ -50,8 +49,7 @@ let step_actions = [
'stepInto', // # debugger
'stepInto', // step into instance.exports.main(1)
'resume', // move to breakpoint
- // then just resume.
- 'resume',
+ 'resume', // then just resume.
];
contextGroup.addScript(`
@@ -69,13 +67,17 @@ function test() {
evalWithUrl(
'instantiate(' + JSON.stringify(module_bytes) + ')', 'callInstantiate');
const scriptId = await waitForWasmScript();
- InspectorTest.log(
- 'Setting breakpoint on line 3 of wasm function');
- let msg = await Protocol.Debugger.setBreakpoint(
- {'location': {'scriptId': scriptId, 'lineNumber': 0, 'columnNumber': 2 + func.body_offset}});
+ InspectorTest.log('Setting breakpoint on i32.const');
+ let msg = await Protocol.Debugger.setBreakpoint({
+ 'location': {
+ 'scriptId': scriptId,
+ 'lineNumber': 0,
+ 'columnNumber': 2 + func.body_offset
+ }
+ });
printFailure(msg);
InspectorTest.logMessage(msg.result.actualLocation);
- await Protocol.Runtime.evaluate({ expression: 'test()' });
+ await Protocol.Runtime.evaluate({expression: 'test()'});
InspectorTest.log('exports.main returned!');
InspectorTest.log('Finished!');
InspectorTest.completeTest();
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt
new file mode 100644
index 0000000000..2f4194cd53
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt
@@ -0,0 +1,69 @@
+Tests stepping through wasm scripts by byte offsets
+Setting up global instance variable.
+Got wasm script: wasm://wasm/42af3c82
+Setting breakpoint on offset 72 (should be propagated to 73, the offset of the call), url wasm://wasm/42af3c82
+{
+ columnNumber : 73
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01
+Debugger.stepOver called
+Script wasm://wasm/42af3c82 byte offset 53: Wasm opcode 0x01
+Debugger.stepOut called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
+Debugger.stepOut called
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
+Debugger.stepOver called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20
+Debugger.resume called
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01
+Debugger.stepOut called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 61: Wasm opcode 0x04
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 63: Wasm opcode 0x20
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 65: Wasm opcode 0x41
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 67: Wasm opcode 0x6b
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 68: Wasm opcode 0x21
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 70: Wasm opcode 0x41
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 53: Wasm opcode 0x01
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 54: Wasm opcode 0x0b
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20
+Debugger.resume called
+exports.main returned!
+Test stepping over a recursive call
+Setting breakpoint on the recursive call instruction @+93, url wasm://wasm/42af3c82
+{
+ columnNumber : 93
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+Script wasm://wasm/42af3c82 byte offset 93: Wasm opcode 0x10
+Removing breakpoint
+Debugger.stepOver called
+Script wasm://wasm/42af3c82 byte offset 95: Wasm opcode 0x20
+Debugger.resume called
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js
new file mode 100644
index 0000000000..78c65e01f1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js
@@ -0,0 +1,146 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --debug-in-liftoff
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests stepping through wasm scripts by byte offsets');
+session.setupScriptMap();
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+var func_a_idx =
+ builder.addFunction('wasm_A', kSig_v_i).addBody([kExprNop, kExprNop]).index;
+
+// wasm_B calls wasm_A <param0> times.
+var func_b = builder.addFunction('wasm_B', kSig_v_i)
+ .addBody([
+ // clang-format off
+ kExprLoop, kWasmStmt, // while
+ kExprLocalGet, 0, // -
+ kExprIf, kWasmStmt, // if <param0> != 0
+ kExprLocalGet, 0, // -
+ kExprI32Const, 1, // -
+ kExprI32Sub, // -
+ kExprLocalSet, 0, // decrease <param0>
+ ...wasmI32Const(1024), // some longer i32 const (2 byte imm)
+ kExprCallFunction, func_a_idx, // -
+ kExprBr, 1, // continue
+ kExprEnd, // -
+ kExprEnd, // break
+ // clang-format on
+ ])
+ .exportAs('main');
+
+let fact = builder.addFunction('fact', kSig_i_i)
+ .addLocals({i32_count: 1})
+ .addBody([
+ // clang-format off
+ kExprLocalGet, 0,
+ kExprIf, kWasmI32, // if <param0> != 0
+ kExprLocalGet, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprCallFunction, 2,
+ kExprLocalGet, 0,
+ kExprI32Mul, // return fact(<param0> - 1) * <param0>
+ kExprElse, // else
+ kExprI32Const, 1, // return 1
+ kExprEnd,
+ // clang-format on
+ ])
+ .exportAs('fact');
+
+var module_bytes = builder.toArray();
+
+function instantiate(bytes) {
+ var buffer = new ArrayBuffer(bytes.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ var module = new WebAssembly.Module(buffer);
+ // Set global variable.
+ instance = new WebAssembly.Instance(module);
+}
+
+(async function test() {
+ for (const action of ['stepInto', 'stepOver', 'stepOut', 'resume'])
+ InspectorTest.logProtocolCommandCalls('Debugger.' + action);
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting up global instance variable.');
+ Protocol.Runtime.evaluate({
+ expression: `var instance;` +
+ `(${instantiate.toString()})(${JSON.stringify(module_bytes)})`
+ });
+ const [, {params: wasmScript}] = await Protocol.Debugger.onceScriptParsed(2);
+
+ InspectorTest.log('Got wasm script: ' + wasmScript.url);
+
+ // Set the breakpoint on a non-breakable position. This should resolve to the
+ // next instruction.
+ var offset = func_b.body_offset + 15;
+ InspectorTest.log(
+ `Setting breakpoint on offset ` + offset + ` (should be propagated to ` +
+ (offset + 1) + `, the offset of the call), url ${wasmScript.url}`);
+ let bpmsg = await Protocol.Debugger.setBreakpoint({
+ location: {scriptId: wasmScript.scriptId, lineNumber: 0, columnNumber: offset}
+ });
+
+ InspectorTest.logMessage(bpmsg.result.actualLocation);
+ Protocol.Runtime.evaluate({ expression: 'instance.exports.main(4)' });
+ await waitForPauseAndStep('stepInto'); // into call to wasm_A
+ await waitForPauseAndStep('stepOver'); // over first nop
+ await waitForPauseAndStep('stepOut'); // out of wasm_A
+ await waitForPauseAndStep('stepOut'); // out of wasm_B, stop on breakpoint
+ await waitForPauseAndStep('stepOver'); // over call
+ await waitForPauseAndStep('stepInto'); // == stepOver br
+ await waitForPauseAndStep('resume'); // to next breakpoint (3rd iteration)
+ await waitForPauseAndStep('stepInto'); // into wasm_A
+ await waitForPauseAndStep('stepOut'); // out to wasm_B
+ // Now step 10 times, until we are in wasm_A again.
+ for (let i = 0; i < 10; ++i) await waitForPauseAndStep('stepInto');
+ // 3 more times, back to wasm_B.
+ for (let i = 0; i < 3; ++i) await waitForPauseAndStep('stepInto');
+ // Then just resume.
+ await waitForPauseAndStep('resume');
+ InspectorTest.log('exports.main returned!');
+
+ InspectorTest.log('Test stepping over a recursive call');
+ // Set a breakpoint at the recursive call and run.
+ offset = fact.body_offset + 9; // Offset of the recursive call instruction.
+ InspectorTest.log(
+ `Setting breakpoint on the recursive call instruction @+` + offset +
+ `, url ${wasmScript.url}`);
+ bpmsg = await Protocol.Debugger.setBreakpoint({
+ location: {scriptId: wasmScript.scriptId, lineNumber: 0, columnNumber: offset}
+ });
+ actualLocation = bpmsg.result.actualLocation;
+ InspectorTest.logMessage(actualLocation);
+ Protocol.Runtime.evaluate({ expression: 'instance.exports.fact(4)' });
+ await waitForPause();
+
+ // Remove the breakpoint before stepping over.
+ InspectorTest.log('Removing breakpoint');
+ let breakpointId = bpmsg.result.breakpointId;
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await Protocol.Debugger.stepOver();
+ await waitForPauseAndStep('resume');
+ InspectorTest.log('Finished!');
+})().catch(reason => InspectorTest.log(`Failed: ${reason}`))
+ .finally(InspectorTest.completeTest);
+
+async function waitForPauseAndStep(stepAction) {
+ await waitForPause();
+ Protocol.Debugger[stepAction]();
+}
+
+async function waitForPause() {
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ await session.logSourceLocation(callFrames[0].location);
+}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt
index 962456cabf..29ea23e14e 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt
@@ -3,7 +3,7 @@ Installing code and global variable.
Calling instantiate function.
Waiting for wasm scripts to be parsed.
Ignoring script with url v8://test/callInstantiate
-Got wasm script: wasm://wasm/485e942e
+Got wasm script: wasm://wasm/242f4a16
Setting breakpoint at start of wasm function
{
columnNumber : 33
@@ -18,7 +18,7 @@ function test() {
Debugger.resume
paused
-Script wasm://wasm/485e942e byte offset 33: Wasm opcode 0x1
+Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01
Debugger.stepOut
paused
instance.exports.main();
@@ -37,10 +37,10 @@ function test() {
Debugger.resume
paused
-Script wasm://wasm/485e942e byte offset 33: Wasm opcode 0x1
+Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01
Debugger.stepOver
paused
-Script wasm://wasm/485e942e byte offset 34: Wasm opcode 0xb
+Script wasm://wasm/242f4a16 byte offset 34: Wasm opcode 0x0b
Debugger.resume
exports.main returned!
Finished run 2!
@@ -53,10 +53,11 @@ function test() {
Debugger.resume
paused
-Script wasm://wasm/485e942e byte offset 33: Wasm opcode 0x1
+Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01
Debugger.stepInto
paused
-Script wasm://wasm/485e942e byte offset 34: Wasm opcode 0xb
+Script wasm://wasm/242f4a16 byte offset 34: Wasm opcode 0x0b
Debugger.resume
exports.main returned!
Finished run 3!
+
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
index 7d4f3a71ed..6e4f8ffc6a 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
@@ -1,305 +1,352 @@
Tests stepping through wasm scripts with source maps
Installing code an global variable and instantiate.
-Got wasm script: wasm://wasm/3697f0fe
+Got wasm script: wasm://wasm/9b4bf87e
Script sourceMapURL: abc
-Requesting source for wasm://wasm/3697f0fe...
+Requesting source for wasm://wasm/9b4bf87e...
Source retrieved without error: true
-Setting breakpoint on offset 54 (on the setlocal before the call), url wasm://wasm/3697f0fe
+Setting breakpoint on offset 54 (on the setlocal before the call), url wasm://wasm/9b4bf87e
{
columnNumber : 54
lineNumber : 0
scriptId : <scriptId>
}
-Paused at wasm://wasm/3697f0fe:0:54
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":4}
+ locals: {"var0":4}
stack: {"0":3}
+ - scope (wasm-expression-stack):
+ {"0":3}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:56
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:38
+Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
at wasm_A (0:38):
- scope (global):
-- skipped
- scope (local):
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/3697f0fe:0:39
+Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- scope (global):
-- skipped
- scope (local):
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/3697f0fe:0:58
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/3697f0fe:0:54
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":3}
+ locals: {"var0":3}
stack: {"0":2}
+ - scope (wasm-expression-stack):
+ {"0":2}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/3697f0fe:0:56
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":2}
+ locals: {"var0":2}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Paused at wasm://wasm/3697f0fe:0:58
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":2}
+ locals: {"var0":2}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.resume called
-Paused at wasm://wasm/3697f0fe:0:54
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":2}
+ locals: {"var0":2}
stack: {"0":1}
+ - scope (wasm-expression-stack):
+ {"0":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:56
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:38
+Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
at wasm_A (0:38):
- scope (global):
-- skipped
- scope (local):
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/3697f0fe:0:58
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:43
-at wasm_B (0:43):
- - scope (global):
- -- skipped
- - scope (local):
- locals: {"arg#0":1}
- stack: {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:45
+Script wasm://wasm/9b4bf87e byte offset 45: Wasm opcode 0x20
at wasm_B (0:45):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:47
+Script wasm://wasm/9b4bf87e byte offset 47: Wasm opcode 0x04
at wasm_B (0:47):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1}
+ - scope (wasm-expression-stack):
+ {"0":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:49
+Script wasm://wasm/9b4bf87e byte offset 49: Wasm opcode 0x20
at wasm_B (0:49):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:51
+Script wasm://wasm/9b4bf87e byte offset 51: Wasm opcode 0x41
at wasm_B (0:51):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1}
+ - scope (wasm-expression-stack):
+ {"0":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:53
+Script wasm://wasm/9b4bf87e byte offset 53: Wasm opcode 0x6b
at wasm_B (0:53):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":1,"1":1}
+ - scope (wasm-expression-stack):
+ {"0":1,"1":1}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:54
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":1}
+ locals: {"var0":1}
stack: {"0":0}
+ - scope (wasm-expression-stack):
+ {"0":0}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:56
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:38
+Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
at wasm_A (0:38):
- scope (global):
-- skipped
- scope (local):
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:39
+Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- scope (global):
-- skipped
- scope (local):
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:40
+Script wasm://wasm/9b4bf87e byte offset 40: Wasm opcode 0x0b
at wasm_A (0:40):
- scope (global):
-- skipped
- scope (local):
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at wasm_B (0:56):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/3697f0fe:0:58
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- scope (global):
-- skipped
- scope (local):
- locals: {"arg#0":0}
+ locals: {"var0":0}
stack: {}
+ - scope (wasm-expression-stack):
+ {}
at (anonymous) (0:17):
- scope (global):
-- skipped
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 57b2fd581c..17b1d05153 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -4,6 +4,7 @@
let {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts with source maps');
+session.setupScriptMap();
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -80,8 +81,8 @@ function instantiate(bytes) {
await waitForPauseAndStep('stepInto'); // to call
await waitForPauseAndStep('stepInto'); // into wasm_A
await waitForPauseAndStep('stepOut'); // out to wasm_B
- // now step 9 times, until we are in wasm_A again.
- for (let i = 0; i < 9; ++i) await waitForPauseAndStep('stepInto');
+ // Now step 8 times, until we are in wasm_A again.
+ for (let i = 0; i < 8; ++i) await waitForPauseAndStep('stepInto');
// 3 more times, back to wasm_B.
for (let i = 0; i < 3; ++i) await waitForPauseAndStep('stepInto');
// then just resume.
@@ -93,9 +94,7 @@ function instantiate(bytes) {
async function waitForPauseAndStep(stepAction) {
const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
- const topFrame = callFrames[0];
- InspectorTest.log(
- `Paused at ${topFrame.url}:${topFrame.location.lineNumber}:${topFrame.location.columnNumber}`);
+ await session.logSourceLocation(callFrames[0].location);
for (var frame of callFrames) {
const functionName = frame.functionName || '(anonymous)';
const lineNumber = frame.location.lineNumber;
@@ -112,9 +111,14 @@ async function waitForPauseAndStep(stepAction) {
functionDeclaration: 'function() { return this; }',
returnByValue: true
});
- if (value.locals)
- InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
- InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
+
+ if (scope.type === 'local') {
+ if (value.locals)
+ InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
+ InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
+ } else {
+ InspectorTest.log(` ${JSON.stringify(value)}`);
+ }
}
}
}
diff --git a/deps/v8/test/inspector/debugger/wasm-unnamed-function-names-expected.txt b/deps/v8/test/inspector/debugger/wasm-unnamed-function-names-expected.txt
new file mode 100644
index 0000000000..d7819588a5
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-unnamed-function-names-expected.txt
@@ -0,0 +1,10 @@
+Tests unnamed function in wasm scripts
+Running testFunction with generated wasm bytes...
+Paused on 'debugger;'
+Number of frames: 5
+ - [0] call_debugger
+ - [1] func1
+ - [2] main
+ - [3] testFunction
+ - [4]
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-unnamed-function-names.js b/deps/v8/test/inspector/debugger/wasm-unnamed-function-names.js
new file mode 100644
index 0000000000..fae8d66abc
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-unnamed-function-names.js
@@ -0,0 +1,69 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests unnamed function in wasm scripts');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+var imported_idx = builder.addImport('mode', 'import_func', kSig_v_v);
+
+// Unnamed non export function.
+var function_idx = builder.addFunction(undefined, kSig_v_v)
+ .addBody([kExprCallFunction, imported_idx])
+ .index;
+
+// Unnamed export function.
+builder.addFunction(undefined, kSig_v_v)
+ .addBody([kExprCallFunction, function_idx])
+ .exportAs('main');
+
+var module_bytes = builder.toArray();
+
+function testFunction(bytes) {
+ function call_debugger() {
+ debugger;
+ }
+
+ var buffer = new ArrayBuffer(bytes.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; i++) {
+ view[i] = bytes[i] | 0;
+ }
+
+ var module = new WebAssembly.Module(buffer);
+ var instance =
+ new WebAssembly.Instance(module, {mode: {import_func: call_debugger}});
+
+ instance.exports.main();
+}
+
+contextGroup.addScript(testFunction.toString());
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ Protocol.Debugger.onPaused(handleDebuggerPaused);
+ InspectorTest.log('Running testFunction with generated wasm bytes...');
+ await Protocol.Runtime.evaluate(
+ {'expression': 'testFunction(' + JSON.stringify(module_bytes) + ')'});
+
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+})();
+
+function logStackTrace(messageObject) {
+ var frames = messageObject.params.callFrames;
+ InspectorTest.log('Number of frames: ' + frames.length);
+ for (var i = 0; i < frames.length; ++i) {
+ InspectorTest.log(' - [' + i + '] ' + frames[i].functionName);
+ }
+}
+
+function handleDebuggerPaused(messageObject) {
+ InspectorTest.log('Paused on \'debugger;\'');
+ logStackTrace(messageObject);
+ Protocol.Debugger.resume();
+}
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 075744a40e..4837f3caea 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -70,8 +70,7 @@ std::vector<uint8_t> ToBytes(v8::Isolate* isolate, v8::Local<v8::String> str) {
}
v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str) {
- return v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(isolate, str).ToLocalChecked();
}
v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index 0b6d8abda2..ba06c9df3c 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -17,6 +17,14 @@
}], # ALWAYS
##############################################################################
+['variant == future', {
+ # https://crbug.com/v8/10356
+ # This test worked in the wasm interpreter, but fails when using Liftoff for
+ # debugging.
+ 'debugger/wasm-anyref-global': [FAIL],
+}],
+
+##############################################################################
['system == android', {
# https://crbug.com/v8/8197
'debugger/get-possible-breakpoints-class-fields': [SKIP],
@@ -70,8 +78,11 @@
##############################################################################
['arch == ppc or arch == ppc64', {
# Liftoff needs to be enabled before running these tests.
+ 'debugger/wasm-scope-info': [SKIP],
'debugger/wasm-scope-info-liftoff': [SKIP],
'debugger/wasm-set-breakpoint-liftoff': [SKIP],
+ 'debugger/wasm-stepping-liftoff': [SKIP],
+ 'debugger/wasm-inspect-many-registers': [SKIP],
}], # 'arch == ppc or arch == ppc64'
##############################################################################
@@ -79,16 +90,13 @@
# Stack manipulations in LiveEdit is not implemented for this arch.
'debugger/set-script-source-stack-padding': [SKIP],
# Liftoff needs to be enabled before running these tests.
+ 'debugger/wasm-scope-info': [SKIP],
'debugger/wasm-scope-info-liftoff': [SKIP],
'debugger/wasm-set-breakpoint-liftoff': [SKIP],
+ 'debugger/wasm-stepping-liftoff': [SKIP],
+ 'debugger/wasm-inspect-many-registers': [SKIP],
}], # 'arch == s390 or arch == s390x'
##############################################################################
-['variant != future', {
- # Wasm native module cache is temporarily disabled in non-future variant
- # (https://crbug.com/1070199)
- 'debugger/wasm-scripts': [SKIP],
-}], # variant != future
-
]
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 8011007e34..2d69ba4a43 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -76,10 +76,9 @@ IsolateData::IsolateData(TaskRunner* task_runner,
v8::HandleScope handle_scope(isolate_.get());
not_inspectable_private_.Reset(
isolate_.get(),
- v8::Private::ForApi(isolate_.get(), v8::String::NewFromUtf8(
- isolate_.get(), "notInspectable",
- v8::NewStringType::kNormal)
- .ToLocalChecked()));
+ v8::Private::ForApi(
+ isolate_.get(),
+ v8::String::NewFromUtf8Literal(isolate_.get(), "notInspectable")));
}
IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
@@ -310,9 +309,7 @@ void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
if (context.IsEmpty()) return;
v8::Local<v8::Promise> promise = data.GetPromise();
v8::Local<v8::Private> id_private = v8::Private::ForApi(
- isolate,
- v8::String::NewFromUtf8(isolate, "id", v8::NewStringType::kNormal)
- .ToLocalChecked());
+ isolate, v8::String::NewFromUtf8Literal(isolate, "id"));
if (data.GetEvent() == v8::kPromiseHandlerAddedAfterReject) {
v8::Local<v8::Value> id;
@@ -373,9 +370,8 @@ std::vector<int> IsolateData::GetSessionIds(int context_group_id) {
bool IsolateData::formatAccessorsAsProperties(v8::Local<v8::Value> object) {
v8::Local<v8::Context> context = isolate()->GetCurrentContext();
v8::Local<v8::Private> shouldFormatAccessorsPrivate = v8::Private::ForApi(
- isolate(), v8::String::NewFromUtf8(isolate(), "allowAccessorFormatting",
- v8::NewStringType::kNormal)
- .ToLocalChecked());
+ isolate(),
+ v8::String::NewFromUtf8Literal(isolate(), "allowAccessorFormatting"));
CHECK(object->IsObject());
return object.As<v8::Object>()
->HasPrivate(context, shouldFormatAccessorsPrivate)
@@ -461,13 +457,14 @@ namespace {
class StringBufferImpl : public v8_inspector::StringBuffer {
public:
StringBufferImpl(v8::Isolate* isolate, v8::Local<v8::String> string)
- : data_(ToVector(isolate, string)),
- view_(data_.begin(), data_.length()) {}
- const v8_inspector::StringView& string() override { return view_; }
+ : data_(ToVector(isolate, string)) {}
+
+ v8_inspector::StringView string() const override {
+ return v8_inspector::StringView(data_.begin(), data_.length());
+ }
private:
v8::internal::Vector<uint16_t> data_;
- v8_inspector::StringView view_;
};
} // anonymous namespace
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 71e32f2f4e..3b514ab538 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -287,7 +287,9 @@ InspectorTest.Session = class {
if (location.lineNumber != 0) {
InspectorTest.log('Unexpected wasm line number: ' + location.lineNumber);
}
- InspectorTest.log(`Script ${script.url} byte offset ${location.columnNumber}: Wasm opcode 0x${script.bytecode[location.columnNumber].toString(16)}`);
+ let wasm_opcode = script.bytecode[location.columnNumber].toString(16);
+ if (wasm_opcode.length % 2) wasm_opcode = '0' + wasm_opcode;
+ InspectorTest.log(`Script ${script.url} byte offset ${location.columnNumber}: Wasm opcode 0x${wasm_opcode}`);
} else {
var lines = script.scriptSource.split('\n');
var line = lines[location.lineNumber];
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 65c543e45f..63d6696cf8 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -11,6 +11,7 @@ Checks basic ES6 modules support.
isModule : true
length : 83
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -29,6 +30,7 @@ Checks basic ES6 modules support.
isModule : true
length : 84
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -47,6 +49,7 @@ Checks basic ES6 modules support.
isModule : true
length : 286
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
@@ -206,6 +209,7 @@ console.log(239)
isModule : true
length : 1
scriptId : <scriptId>
+ scriptLanguage : JavaScript
sourceMapURL :
startColumn : 0
startLine : 0
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index e07aa9e616..512bf54eb6 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -62,7 +62,8 @@ class TestCase(testcase.TestCase):
def output_proc(self):
return outproc.ExpectedOutProc(
self.expected_outcomes,
- os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX)
+ os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX,
+ self.suite.test_config.regenerate_expected_files)
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/intl/regress-10248.js b/deps/v8/test/intl/regress-10248.js
new file mode 100644
index 0000000000..c1cdc7b42a
--- /dev/null
+++ b/deps/v8/test/intl/regress-10248.js
@@ -0,0 +1,70 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://tc39.es/ecma262/#sec-runtime-semantics-canonicalize-ch
+function Canonicalize(ch) {
+ var u = ch.toUpperCase();
+ if (u.length > 1) return ch;
+ var cu = u.charCodeAt(0);
+ if (ch.charCodeAt(0) >= 128 && cu < 128) return ch;
+ return cu;
+}
+
+function TestEquivalenceClass(eclass) {
+ for (var i = 0; i < eclass.length; i++) {
+ for (var j = 0; j < eclass.length; j++) {
+ if (i == j) continue;
+ var c1 = eclass[i];
+ var c2 = eclass[j];
+ var shouldMatch = Canonicalize(c1) === Canonicalize(c2);
+
+ var re1 = new RegExp(c1, 'i');
+ var re2 = new RegExp('[' + c1 + ']', 'i');
+
+ assertEquals(re1.test(c2), shouldMatch);
+ assertEquals(re2.test(c2), shouldMatch);
+ }
+ }
+}
+
+function TestAll() {
+ for (var eclass of equivalence_classes) {
+ TestEquivalenceClass(eclass);
+ }
+}
+
+// Interesting case-folding equivalence classes (as determined by
+// ICU's UnicodeSet::closeOver). A class is interesting if it contains
+// more than two characters, or if it contains any characters in
+// IgnoreSet or SpecialAddSet as defined in src/regexp/special-case.h.
+var equivalence_classes = [
+ '\u0041\u0061', // Aa (sanity check)
+ '\u004b\u006b\u212a', // KkK
+ '\u0053\u0073\u017f', // Ssſ
+ '\u00b5\u039c\u03bc', // µΜμ
+ '\u00c5\u00e5\u212b', // ÅåÅ
+ '\u00df\u1e9e', // ßẞ
+ '\u03a9\u03c9\u2126', // ΩωΩ
+ '\u0390\u1fd3', // ΐΐ
+ '\u0398\u03b8\u03d1\u03f4', // Θθϑϴ
+ '\u03b0\u1fe3', // ΰΰ
+ '\u1f80\u1f88', // ᾀᾈ
+ '\u1fb3\u1fbc', // ᾳᾼ
+ '\u1fc3\u1fcc', // ῃῌ
+ '\u1ff3\u1ffc', // ῳῼ
+ '\ufb05\ufb06', // ſtst
+
+ // Everything below this line is a well-behaved case-folding
+ // equivalence class with more than two characters but only one
+ // canonical case-folded character
+ '\u01c4\u01c5\u01c6', '\u01c7\u01c8\u01c9', '\u01ca\u01cb\u01cc',
+ '\u01f1\u01f2\u01f3', '\u0345\u0399\u03b9\u1fbe', '\u0392\u03b2\u03d0',
+ '\u0395\u03b5\u03f5', '\u039a\u03ba\u03f0', '\u03a0\u03c0\u03d6',
+ '\u03a1\u03c1\u03f1', '\u03a3\u03c2\u03c3', '\u03a6\u03c6\u03d5',
+ '\u0412\u0432\u1c80', '\u0414\u0434\u1c81', '\u041e\u043e\u1c82',
+ '\u0421\u0441\u1c83', '\u0422\u0442\u1c84\u1c85', '\u042a\u044a\u1c86',
+ '\u0462\u0463\u1c87', '\u1c88\ua64a\ua64b', '\u1e60\u1e61\u1e9b'
+];
+
+TestAll();
diff --git a/deps/v8/test/intl/regress-1030160.js b/deps/v8/test/intl/regress-1030160.js
new file mode 100644
index 0000000000..0f04677db2
--- /dev/null
+++ b/deps/v8/test/intl/regress-1030160.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("NaN", (null/null).toLocaleString());
diff --git a/deps/v8/test/intl/regress-1041319.js b/deps/v8/test/intl/regress-1041319.js
new file mode 100644
index 0000000000..7092194876
--- /dev/null
+++ b/deps/v8/test/intl/regress-1041319.js
@@ -0,0 +1,46 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test to check "algorithmic" numbering systems stated in UTS35 but not
+// mandated by ECMA402 won't crash.
+// The entries which type is "algorithmic" in
+// https://github.com/unicode-org/cldr/blob/master/common/supplemental/numberingSystems.xml
+// These are numbering systems which is not supported in ECMA402 but we should
+// not crash.
+let algorithmicNumberingSystems = [
+ "armn",
+ "armnlow",
+ "cyrl",
+ "ethi",
+ "geor",
+ "grek",
+ "greklow",
+ "hanidays",
+ "hans",
+ "hansfin",
+ "hant",
+ "hantfin",
+ "hebr",
+ "jpan",
+ "jpanfin",
+ "jpanyear",
+ "roman",
+ "romanlow",
+ "taml",
+];
+
+for (numberingSystem of algorithmicNumberingSystems) {
+ let baseLocale = "en";
+ let locale = baseLocale + "-u-nu-" + numberingSystem;
+
+ // Ensure the creation won't crash
+ let rtf = new Intl.RelativeTimeFormat(locale);
+ let rtf2 = new Intl.RelativeTimeFormat(baseLocale, {numberingSystem});
+
+ let dtf = new Intl.DateTimeFormat(locale);
+ let dtf2 = new Intl.DateTimeFormat(baseLocale, {numberingSystem});
+
+ let nf = new Intl.NumberFormat(locale);
+ let nf2 = new Intl.NumberFormat(baseLocale, {numberingSystem});
+}
diff --git a/deps/v8/test/js-perf-test/Scope/with.js b/deps/v8/test/js-perf-test/Scope/with.js
index 8ec2d30cf0..b4c1893c6e 100644
--- a/deps/v8/test/js-perf-test/Scope/with.js
+++ b/deps/v8/test/js-perf-test/Scope/with.js
@@ -2,18 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-new BenchmarkSuite( 'With', [1000], [
- new Benchmark('AccessOnSameLevel', false, false, 0,
- AccessOnSameLevel, AccessOnSameLevelSetup,
- AccessOnSameLevelTearDown),
- new Benchmark('SetOnSameLevel', false, false, 0,
- SetOnSameLevel, SetOnSameLevelSetup,
- SetOnSameLevelTearDown),
- new Benchmark('AccessOverPrototypeChain', false, false, 0,
- AccessOverPrototypeChainSetup, AccessOverPrototypeChainSetup,
- AccessOverPrototypeChainTearDown),
- new Benchmark('CompetingScope', false, false, 0,
- CompetingScope, CompetingScopeSetup, CompetingScopeTearDown)
+new BenchmarkSuite('With', [1000], [
+ new Benchmark(
+ 'AccessOnSameLevel', false, false, 0, AccessOnSameLevel,
+ AccessOnSameLevelSetup, AccessOnSameLevelTearDown),
+ new Benchmark(
+ 'SetOnSameLevel', false, false, 0, SetOnSameLevel, SetOnSameLevelSetup,
+ SetOnSameLevelTearDown),
+ new Benchmark(
+ 'AccessOverPrototypeChain', false, false, 0, AccessOverPrototypeChain,
+ AccessOverPrototypeChainSetup, AccessOverPrototypeChainTearDown),
+ new Benchmark(
+ 'CompetingScope', false, false, 0, CompetingScope, CompetingScopeSetup,
+ CompetingScopeTearDown)
]);
var objectUnderTest;
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out
index ca3154a26b..66429501f6 100644
--- a/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-getter-nested.out
@@ -1,10 +1,10 @@
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:9: TypeError: Cannot read private member C from an object whose class did not declare it
+*%(basename)s:9: TypeError: Object must be an instance of class C
setA(obj, val) { obj.#a = val; }
^
-TypeError: Cannot read private member C from an object whose class did not declare it
+TypeError: Object must be an instance of class C
at C.setA (*%(basename)s:9:24)
at new C (*%(basename)s:15:10)
at *%(basename)s:19:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out
index 5f22848692..ffdc328a95 100644
--- a/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out
+++ b/deps/v8/test/message/fail/class-accessors-private-undefined-setter-nested.out
@@ -1,10 +1,10 @@
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:9: TypeError: Cannot read private member C from an object whose class did not declare it
+*%(basename)s:9: TypeError: Object must be an instance of class C
getA(obj) { return obj.#a; }
^
-TypeError: Cannot read private member C from an object whose class did not declare it
+TypeError: Object must be an instance of class C
at C.getA (*%(basename)s:9:26)
at new C (*%(basename)s:15:10)
at *%(basename)s:19:1
diff --git a/deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.js b/deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.js
new file mode 100644
index 0000000000..4f012e1b93
--- /dev/null
+++ b/deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.js
@@ -0,0 +1,11 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+const C = class {
+ #a() {}
+ test(obj) { obj.#a(); }
+};
+(new C).test({});
diff --git a/deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.out b/deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.out
new file mode 100644
index 0000000000..e5efede21c
--- /dev/null
+++ b/deps/v8/test/message/fail/class-methods-private-brand-check-anonymous.out
@@ -0,0 +1,9 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: TypeError: Object must be an instance of class anonymous
+ test(obj) { obj.#a(); }
+ ^
+TypeError: Object must be an instance of class anonymous
+ at C.test (*%(basename)s:9:19)
+ at *%(basename)s:11:9
diff --git a/deps/v8/test/message/fail/class-methods-private-brand-check.js b/deps/v8/test/message/fail/class-methods-private-brand-check.js
new file mode 100644
index 0000000000..7d044b884e
--- /dev/null
+++ b/deps/v8/test/message/fail/class-methods-private-brand-check.js
@@ -0,0 +1,11 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+ #a() {}
+ test(obj) { obj.#a(); }
+}
+(new C).test({});
diff --git a/deps/v8/test/message/fail/class-methods-private-brand-check.out b/deps/v8/test/message/fail/class-methods-private-brand-check.out
new file mode 100644
index 0000000000..fe166fa359
--- /dev/null
+++ b/deps/v8/test/message/fail/class-methods-private-brand-check.out
@@ -0,0 +1,9 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: TypeError: Object must be an instance of class C
+ test(obj) { obj.#a(); }
+ ^
+TypeError: Object must be an instance of class C
+ at C.test (*%(basename)s:9:19)
+ at *%(basename)s:11:9 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/spread-call-2.js b/deps/v8/test/message/fail/spread-call-2.js
new file mode 100644
index 0000000000..c66c93c75b
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call-2.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+x = undefined;
+console.log(1, ...x);
diff --git a/deps/v8/test/message/fail/spread-call-2.out b/deps/v8/test/message/fail/spread-call-2.out
new file mode 100644
index 0000000000..7d1a4a8177
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call-2.out
@@ -0,0 +1,5 @@
+*%(basename)s:5: TypeError: x is not iterable (cannot read property undefined)
+console.log(1, ...x);
+ ^
+TypeError: x is not iterable (cannot read property undefined)
+ at *%(basename)s:5:9
diff --git a/deps/v8/test/message/fail/spread-call-3.js b/deps/v8/test/message/fail/spread-call-3.js
new file mode 100644
index 0000000000..4068528771
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call-3.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+p = undefined;
+f = () => 3;
+var [x] = f(...p);
diff --git a/deps/v8/test/message/fail/spread-call-3.out b/deps/v8/test/message/fail/spread-call-3.out
new file mode 100644
index 0000000000..ad87ac0410
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call-3.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: TypeError: p is not iterable (cannot read property undefined)
+var [x] = f(...p);
+ ^
+TypeError: p is not iterable (cannot read property undefined)
+ at *%(basename)s:6:11
diff --git a/deps/v8/test/message/fail/spread-call.js b/deps/v8/test/message/fail/spread-call.js
new file mode 100644
index 0000000000..8642d5c32f
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+x = null;
+console.log(1, ...x);
diff --git a/deps/v8/test/message/fail/spread-call.out b/deps/v8/test/message/fail/spread-call.out
new file mode 100644
index 0000000000..3f1515ab01
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call.out
@@ -0,0 +1,5 @@
+*%(basename)s:5: TypeError: x is not iterable (cannot read property null)
+console.log(1, ...x);
+ ^
+TypeError: x is not iterable (cannot read property null)
+ at *%(basename)s:5:9
diff --git a/deps/v8/test/message/fail/spread-construct-2.js b/deps/v8/test/message/fail/spread-construct-2.js
new file mode 100644
index 0000000000..fd68a58b17
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-construct-2.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+x = undefined;
+function p() {};
+p(1, ...x);
diff --git a/deps/v8/test/message/fail/spread-construct-2.out b/deps/v8/test/message/fail/spread-construct-2.out
new file mode 100644
index 0000000000..018d5b46af
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-construct-2.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: TypeError: x is not iterable (cannot read property undefined)
+p(1, ...x);
+ ^
+TypeError: x is not iterable (cannot read property undefined)
+ at *%(basename)s:6:1
diff --git a/deps/v8/test/message/fail/spread-construct-3.js b/deps/v8/test/message/fail/spread-construct-3.js
new file mode 100644
index 0000000000..ef2e25bd7e
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-construct-3.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+p = undefined;
+f = () => 3;
+function t() {};
+var [x] = new t(...p);
diff --git a/deps/v8/test/message/fail/spread-construct-3.out b/deps/v8/test/message/fail/spread-construct-3.out
new file mode 100644
index 0000000000..64d4bfcdce
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-construct-3.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: TypeError: p is not iterable (cannot read property undefined)
+var [x] = new t(...p);
+ ^
+TypeError: p is not iterable (cannot read property undefined)
+ at *%(basename)s:7:11
diff --git a/deps/v8/test/message/fail/spread-construct.js b/deps/v8/test/message/fail/spread-construct.js
new file mode 100644
index 0000000000..e45847d69c
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-construct.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+x = null;
+function p() {};
+new p(1, ...x);
diff --git a/deps/v8/test/message/fail/spread-construct.out b/deps/v8/test/message/fail/spread-construct.out
new file mode 100644
index 0000000000..b8aa31de30
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-construct.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: TypeError: x is not iterable (cannot read property null)
+new p(1, ...x);
+ ^
+TypeError: x is not iterable (cannot read property null)
+ at *%(basename)s:6:1
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.out b/deps/v8/test/message/fail/wasm-exception-rethrow.out
index c4c68e5b71..28fd825866 100644
--- a/deps/v8/test/message/fail/wasm-exception-rethrow.out
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x32: RuntimeError: wasm exception
RuntimeError: wasm exception
- at rethrow0 (wasm-function[0]:0x32)
+ at rethrow0 (<anonymous>:wasm-function[0]:0x32)
at *%(basename)s:21:18
diff --git a/deps/v8/test/message/fail/wasm-exception-throw.out b/deps/v8/test/message/fail/wasm-exception-throw.out
index c94ae248c0..af45212f3f 100644
--- a/deps/v8/test/message/fail/wasm-exception-throw.out
+++ b/deps/v8/test/message/fail/wasm-exception-throw.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x2e: RuntimeError: wasm exception
RuntimeError: wasm exception
- at throw0 (wasm-function[0]:0x2e)
+ at throw0 (<anonymous>:wasm-function[0]:0x2e)
at *%(basename)s:17:18
diff --git a/deps/v8/test/message/fail/wasm-function-name.out b/deps/v8/test/message/fail/wasm-function-name.out
index 3f4956a26e..182b15dc87 100644
--- a/deps/v8/test/message/fail/wasm-function-name.out
+++ b/deps/v8/test/message/fail/wasm-function-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at main (wasm-function[0]:0x22)
+ at main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-and-function-name.out b/deps/v8/test/message/fail/wasm-module-and-function-name.out
index 1f25c57271..969ac55cbb 100644
--- a/deps/v8/test/message/fail/wasm-module-and-function-name.out
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at test-module.main (wasm-function[0]:0x22)
+ at test-module.main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-name.out b/deps/v8/test/message/fail/wasm-module-name.out
index 763bec5592..969ac55cbb 100644
--- a/deps/v8/test/message/fail/wasm-module-name.out
+++ b/deps/v8/test/message/fail/wasm-module-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at test-module (wasm-function[0]:0x22)
+ at test-module.main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-no-name.out b/deps/v8/test/message/fail/wasm-no-name.out
index 37836cc495..182b15dc87 100644
--- a/deps/v8/test/message/fail/wasm-no-name.out
+++ b/deps/v8/test/message/fail/wasm-no-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:0x22: RuntimeError: unreachable
RuntimeError: unreachable
- at wasm-function[0]:0x22
+ at main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-trap.out b/deps/v8/test/message/fail/wasm-trap.out
index 66048388e4..85eb9845b4 100644
--- a/deps/v8/test/message/fail/wasm-trap.out
+++ b/deps/v8/test/message/fail/wasm-trap.out
@@ -1,4 +1,4 @@
wasm-function[1]:0x30: RuntimeError: divide by zero
RuntimeError: divide by zero
- at main (wasm-function[1]:0x30)
+ at main (<anonymous>:wasm-function[1]:0x30)
at *%(basename)s:{NUMBER}:16
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationgroup1.out b/deps/v8/test/message/fail/weak-refs-finalizationgroup1.out
deleted file mode 100644
index ddaa32328f..0000000000
--- a/deps/v8/test/message/fail/weak-refs-finalizationgroup1.out
+++ /dev/null
@@ -1,6 +0,0 @@
-*%(basename)s:7: TypeError: FinalizationGroup: cleanup must be callable
-let fg = new FinalizationGroup();
- ^
-TypeError: FinalizationGroup: cleanup must be callable
- at new FinalizationGroup (<anonymous>)
- at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationgroup2.out b/deps/v8/test/message/fail/weak-refs-finalizationgroup2.out
deleted file mode 100644
index 799199aff8..0000000000
--- a/deps/v8/test/message/fail/weak-refs-finalizationgroup2.out
+++ /dev/null
@@ -1,6 +0,0 @@
-*%(basename)s:7: TypeError: FinalizationGroup: cleanup must be callable
-let fg = new FinalizationGroup({});
- ^
-TypeError: FinalizationGroup: cleanup must be callable
- at new FinalizationGroup (<anonymous>)
- at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationgroup2.js b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
index 87a6183de9..f23cfa3a51 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationgroup2.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
@@ -4,4 +4,4 @@
// Flags: --harmony-weak-refs
-let fg = new FinalizationGroup({});
+let fg = new FinalizationRegistry();
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
new file mode 100644
index 0000000000..7775052c91
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: FinalizationRegistry: cleanup must be callable
+let fg = new FinalizationRegistry();
+ ^
+TypeError: FinalizationRegistry: cleanup must be callable
+ at new FinalizationRegistry (<anonymous>)
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationgroup1.js b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
index a97abb3f8b..599bfc6d05 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationgroup1.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
@@ -4,4 +4,4 @@
// Flags: --harmony-weak-refs
-let fg = new FinalizationGroup();
+let fg = new FinalizationRegistry({});
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
new file mode 100644
index 0000000000..278c3506bf
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: FinalizationRegistry: cleanup must be callable
+let fg = new FinalizationRegistry({});
+ ^
+TypeError: FinalizationRegistry: cleanup must be callable
+ at new FinalizationRegistry (<anonymous>)
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-register1.js b/deps/v8/test/message/fail/weak-refs-register1.js
index a90e4aa47c..7110a25e6c 100644
--- a/deps/v8/test/message/fail/weak-refs-register1.js
+++ b/deps/v8/test/message/fail/weak-refs-register1.js
@@ -4,5 +4,5 @@
// Flags: --harmony-weak-refs
-let fg = new FinalizationGroup(() => {});
+let fg = new FinalizationRegistry(() => {});
fg.register(1);
diff --git a/deps/v8/test/message/fail/weak-refs-register1.out b/deps/v8/test/message/fail/weak-refs-register1.out
index 96983664c2..6a9b23ecf8 100644
--- a/deps/v8/test/message/fail/weak-refs-register1.out
+++ b/deps/v8/test/message/fail/weak-refs-register1.out
@@ -1,6 +1,6 @@
-*%(basename)s:8: TypeError: FinalizationGroup.prototype.register: target must be an object
+*%(basename)s:8: TypeError: FinalizationRegistry.prototype.register: target must be an object
fg.register(1);
^
-TypeError: FinalizationGroup.prototype.register: target must be an object
- at FinalizationGroup.register (<anonymous>)
+TypeError: FinalizationRegistry.prototype.register: target must be an object
+ at FinalizationRegistry.register (<anonymous>)
at *%(basename)s:8:4
diff --git a/deps/v8/test/message/fail/weak-refs-register2.js b/deps/v8/test/message/fail/weak-refs-register2.js
index 8934a46511..31df874585 100644
--- a/deps/v8/test/message/fail/weak-refs-register2.js
+++ b/deps/v8/test/message/fail/weak-refs-register2.js
@@ -4,6 +4,6 @@
// Flags: --harmony-weak-refs
-let fg = new FinalizationGroup(() => {});
+let fg = new FinalizationRegistry(() => {});
let o = {};
fg.register(o, o);
diff --git a/deps/v8/test/message/fail/weak-refs-register2.out b/deps/v8/test/message/fail/weak-refs-register2.out
index c7b9e10909..0f2c2f1ee2 100644
--- a/deps/v8/test/message/fail/weak-refs-register2.out
+++ b/deps/v8/test/message/fail/weak-refs-register2.out
@@ -1,6 +1,6 @@
-*%(basename)s:9: TypeError: FinalizationGroup.prototype.register: target and holdings must not be same
+*%(basename)s:9: TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
fg.register(o, o);
^
-TypeError: FinalizationGroup.prototype.register: target and holdings must not be same
- at FinalizationGroup.register (<anonymous>)
+TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
+ at FinalizationRegistry.register (<anonymous>)
at *%(basename)s:9:4
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.js b/deps/v8/test/message/fail/weak-refs-unregister.js
index e0cf0282d2..0f41263cba 100644
--- a/deps/v8/test/message/fail/weak-refs-unregister.js
+++ b/deps/v8/test/message/fail/weak-refs-unregister.js
@@ -4,5 +4,5 @@
// Flags: --harmony-weak-refs
-let fg = new FinalizationGroup(() => {});
+let fg = new FinalizationRegistry(() => {});
fg.unregister(1);
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.out b/deps/v8/test/message/fail/weak-refs-unregister.out
index 938665157d..766d04349f 100644
--- a/deps/v8/test/message/fail/weak-refs-unregister.out
+++ b/deps/v8/test/message/fail/weak-refs-unregister.out
@@ -2,5 +2,5 @@
fg.unregister(1);
^
TypeError: unregisterToken ('1') must be an object
- at FinalizationGroup.unregister (<anonymous>)
+ at FinalizationRegistry.unregister (<anonymous>)
at *%(basename)s:8:4
diff --git a/deps/v8/test/message/wasm-function-name-async.out b/deps/v8/test/message/wasm-function-name-async.out
index a298b07414..ad003ef227 100644
--- a/deps/v8/test/message/wasm-function-name-async.out
+++ b/deps/v8/test/message/wasm-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at main (wasm-function[0]:0x22)
+ at main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-function-name-streaming.out b/deps/v8/test/message/wasm-function-name-streaming.out
index e880c3c322..4e8b7d5a5a 100644
--- a/deps/v8/test/message/wasm-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at main (wasm-function[0]:0x22)
+ at main (<anonymous>:wasm-function[0]:0x22)
at test/message/wasm-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.out b/deps/v8/test/message/wasm-module-and-function-name-async.out
index 0cb61c4a6e..f2d044245b 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (wasm-function[0]:0x22)
+ at test-module.main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-streaming.out b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
index 2367c6f3cf..8e6eebad7f 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (wasm-function[0]:0x22)
+ at test-module.main (<anonymous>:wasm-function[0]:0x22)
at test/message/wasm-module-and-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-async.out b/deps/v8/test/message/wasm-module-name-async.out
index a1c9f69b2b..f2d044245b 100644
--- a/deps/v8/test/message/wasm-module-name-async.out
+++ b/deps/v8/test/message/wasm-module-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module (wasm-function[0]:0x22)
+ at test-module.main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-streaming.out b/deps/v8/test/message/wasm-module-name-streaming.out
index c9e029c250..e7435267e9 100644
--- a/deps/v8/test/message/wasm-module-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module (wasm-function[0]:0x22)
+ at test-module.main (<anonymous>:wasm-function[0]:0x22)
at test/message/wasm-module-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-async.out b/deps/v8/test/message/wasm-no-name-async.out
index 55a722dcb1..ad003ef227 100644
--- a/deps/v8/test/message/wasm-no-name-async.out
+++ b/deps/v8/test/message/wasm-no-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at wasm-function[0]:0x22
+ at main (<anonymous>:wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-streaming.out b/deps/v8/test/message/wasm-no-name-streaming.out
index b117074f78..987952db8b 100644
--- a/deps/v8/test/message/wasm-no-name-streaming.out
+++ b/deps/v8/test/message/wasm-no-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at wasm-function[0]:0x22
+ at main (<anonymous>:wasm-function[0]:0x22)
at test/message/wasm-no-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.out b/deps/v8/test/message/wasm-trace-memory-interpreted.out
index e390f10fe3..e04e6964ca 100644
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.out
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.out
@@ -9,3 +9,6 @@ interpreter func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab000
interpreter func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
interpreter func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
interpreter func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+interpreter func: 7+0x3 load from 00000004 val: i16:48879 / beef
+interpreter func: 8+0x3 load from 00000002 val: i64:-4688528683866062848 / beef0000beef0000
+interpreter func: 9+0x3 load from 00000002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.out b/deps/v8/test/message/wasm-trace-memory-liftoff.out
index 76364cdfaf..5682bca57a 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.out
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.out
@@ -7,5 +7,8 @@ liftoff func: 2+0x3 load from 00000002 val: f32:68169720922112.00000
liftoff func: 4+0x5 store to 00000004 val: i8:171 / ab
liftoff func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
liftoff func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
-turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+liftoff func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
liftoff func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+liftoff func: 7+0x3 load from 00000004 val: i16:48879 / beef
+liftoff func: 8+0x3 load from 00000002 val: i64:-4688528683866062848 / beef0000beef0000
+liftoff func: 9+0x3 load from 00000002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index 4102dad6f0..b268c63106 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -31,6 +31,17 @@ builder.addFunction('load128', kSig_v_i)
builder.addFunction('store128', kSig_v_ii)
.addBody([kExprLocalGet, 0, kExprLocalGet, 1, kSimdPrefix, kExprI32x4Splat, kSimdPrefix, kExprS128StoreMem, 0, 0])
.exportFunc();
+// We add functions after, rather than sorting in some order, so as to keep
+// the .out changes small (due to function index).
+builder.addFunction('load16', kSig_v_i)
+ .addBody([kExprLocalGet, 0, kExprI32LoadMem16U, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('load64', kSig_v_i)
+ .addBody([kExprLocalGet, 0, kExprI64LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
+builder.addFunction('loadf64', kSig_v_i)
+ .addBody([kExprLocalGet, 0, kExprF64LoadMem, 0, 0, kExprDrop])
+ .exportFunc();
var module = builder.instantiate();
module.exports.load(4);
@@ -44,3 +55,6 @@ module.exports.load(2);
module.exports.loadf(2);
module.exports.store128(4, 0xbeef);
module.exports.load128(2);
+module.exports.load16(4);
+module.exports.load64(2);
+module.exports.loadf64(2);
diff --git a/deps/v8/test/message/wasm-trace-memory.out b/deps/v8/test/message/wasm-trace-memory.out
index f41bc30fee..cfb707b543 100644
--- a/deps/v8/test/message/wasm-trace-memory.out
+++ b/deps/v8/test/message/wasm-trace-memory.out
@@ -9,3 +9,6 @@ turbofan func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab000
turbofan func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
turbofan func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+turbofan func: 7+0x3 load from 00000004 val: i16:48879 / beef
+turbofan func: 8+0x3 load from 00000002 val: i64:-4688528683866062848 / beef0000beef0000
+turbofan func: 9+0x3 load from 00000002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.js b/deps/v8/test/message/weakref-finalizationregistry-error.js
new file mode 100644
index 0000000000..1db6bfeccd
--- /dev/null
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.js
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+// Flags: --no-stress-opt
+
+// Since cleanup tasks are top-level tasks, errors thrown from them don't stop
+// future cleanup tasks from running.
+
+function callback(iter) {
+ [...iter];
+ throw new Error('callback');
+};
+
+const fg1 = new FinalizationRegistry(callback);
+const fg2 = new FinalizationRegistry(callback);
+
+(function() {
+let x = {};
+fg1.register(x, {});
+fg2.register(x, {});
+x = null;
+})();
+
+gc();
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.out b/deps/v8/test/message/weakref-finalizationregistry-error.out
new file mode 100644
index 0000000000..4682b63f23
--- /dev/null
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.out
@@ -0,0 +1,12 @@
+*%(basename)s:{NUMBER}: Error: callback
+ throw new Error('callback');
+ ^
+Error: callback
+ at callback (*%(basename)s:{NUMBER}:{NUMBER})
+
+*%(basename)s:{NUMBER}: Error: callback
+ throw new Error('callback');
+ ^
+Error: callback
+ at callback (*%(basename)s:{NUMBER}:{NUMBER})
+
diff --git a/deps/v8/test/mjsunit/BUILD.gn b/deps/v8/test/mjsunit/BUILD.gn
index 847565f91a..f48c0bba5e 100644
--- a/deps/v8/test/mjsunit/BUILD.gn
+++ b/deps/v8/test/mjsunit/BUILD.gn
@@ -12,6 +12,9 @@ group("v8_mjsunit") {
data = [
"./",
+ "../../tools/clusterfuzz/v8_mock.js",
+ "../../tools/clusterfuzz/v8_mock_archs.js",
+ "../../tools/clusterfuzz/v8_mock_webassembly.js",
"../../tools/codemap.js",
"../../tools/consarray.js",
"../../tools/csvparser.js",
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index 5d431b024d..b525406ad7 100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -25,11 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
-/**
- * @fileoverview Test reduce and reduceRight
- */
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
function clone(v) {
// Shallow-copies arrays, returns everything else verbatim.
diff --git a/deps/v8/test/mjsunit/asm/regress-674089.js b/deps/v8/test/mjsunit/asm/regress-674089.js
index edba023dec..7c0e89d3ee 100644
--- a/deps/v8/test/mjsunit/asm/regress-674089.js
+++ b/deps/v8/test/mjsunit/asm/regress-674089.js
@@ -7,10 +7,7 @@
function outer() {
"use asm";
function inner() {
- switch (1) {
- case 0:
- break foo;
- }
+ /f(/
}
}
outer();
diff --git a/deps/v8/test/mjsunit/call-intrinsic-differential-fuzzing.js b/deps/v8/test/mjsunit/call-intrinsic-differential-fuzzing.js
new file mode 100644
index 0000000000..867f5f7cee
--- /dev/null
+++ b/deps/v8/test/mjsunit/call-intrinsic-differential-fuzzing.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-for-differential-fuzzing
+
+// Test blacklisted intrinsics in the context of differential fuzzing.
+assertEquals(undefined, %IsBeingInterpreted());
diff --git a/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js b/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js
new file mode 100644
index 0000000000..3945c8d49d
--- /dev/null
+++ b/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-for-fuzzing
+
+// Test whitelisted/blacklisted intrinsics in the context of fuzzing.
+
+// Blacklisted intrinsics are replaced with undefined.
+assertEquals(undefined, %GetOptimizationStatus(function (){}));
+
+// Blacklisted intrinsics can have wrong arguments.
+assertEquals(undefined, %GetOptimizationStatus(1, 2, 3, 4));
+
+// We don't care if an intrinsic actually exists.
+assertEquals(undefined, %FooBar());
+
+// Check whitelisted intrinsic.
+assertNotEquals(undefined, %IsBeingInterpreted());
+
+// Whitelisted runtime functions with too few args are ignored.
+assertEquals(undefined, %DeoptimizeFunction());
+
+// Superfluous arguments are ignored.
+%DeoptimizeFunction(function() {}, undefined);
+assertNotEquals(undefined, %IsBeingInterpreted(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js b/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
new file mode 100644
index 0000000000..f56cae9c4e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inlined-call-polymorphic.js
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --opt
+
+function make_closure() { return () => { return 42; } }
+%PrepareFunctionForOptimization(make_closure);
+%PrepareFunctionForOptimization(make_closure());
+
+function inline_polymorphic(f) {
+ let answer = f();
+ %TurbofanStaticAssert(answer == 42);
+}
+
+%PrepareFunctionForOptimization(inline_polymorphic);
+inline_polymorphic(make_closure());
+inline_polymorphic(make_closure());
+%OptimizeFunctionOnNextCall(inline_polymorphic);
+inline_polymorphic(make_closure());
+
+try {
+ inline_polymorphic(3);
+} catch(e) {}
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
index 2d7f558c0d..25649b3bbb 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
@@ -36,6 +36,7 @@ function check() {
}
assertEquals(0, r.length);
}
+%PrepareFunctionForOptimization(check);
check();
check();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1061678.js b/deps/v8/test/mjsunit/compiler/regress-1061678.js
new file mode 100644
index 0000000000..fac2326a0a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1061678.js
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Array.prototype[10] = 2;
+
+function foo() {
+ try {
+ [].forEach();
+ } catch (e) {
+ }
+};
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1061803.js b/deps/v8/test/mjsunit/compiler/regress-1061803.js
new file mode 100644
index 0000000000..b951ab70e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1061803.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return arguments[1][0] === arguments[0];
+}
+
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo(0, 0));
+assertFalse(foo(0, 0));
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1062916.js b/deps/v8/test/mjsunit/compiler/regress-1062916.js
new file mode 100644
index 0000000000..f7064571ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1062916.js
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-analyze-environment-liveness --no-use-ic
+
+function foo(x) {
+ var a = [];
+ for (var k1 in x) {
+ for (var k2 in x) {
+ a.k2;
+ }
+ }
+ return a.join();
+}
+
+%PrepareFunctionForOptimization(foo);
+foo({p: 42});
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1063661.js b/deps/v8/test/mjsunit/compiler/regress-1063661.js
new file mode 100644
index 0000000000..e2ff2cb0de
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1063661.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --interrupt-budget=1024
+
+function main() {
+ const v1 = [];
+ for (let v11 = 0; v11 < 7; v11++) {
+ for (let v16 = 0; v16 != 100; v16++) {}
+ for (let v18 = -0.0; v18 < 7; v18 = v18 || 13.37) {
+ const v21 = Math.max(-339,v18);
+ v1.fill();
+ undefined % v21;
+ }
+ }
+}
+main();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1072171.js b/deps/v8/test/mjsunit/compiler/regress-1072171.js
new file mode 100644
index 0000000000..6f6d063c8c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1072171.js
@@ -0,0 +1,45 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function testMax1(b) {
+ const max = Math.max(-1, b ? -0 : 1);
+ return Object.is(max, -0);
+}
+%PrepareFunctionForOptimization(testMax1);
+assertTrue(testMax1(true));
+assertTrue(testMax1(true));
+%OptimizeFunctionOnNextCall(testMax1);
+assertTrue(testMax1(true));
+
+function testMax2(b) {
+ const max = Math.max(b ? -0 : 1, -1);
+ return Object.is(max, -0);
+}
+%PrepareFunctionForOptimization(testMax2);
+assertTrue(testMax2(true));
+assertTrue(testMax2(true));
+%OptimizeFunctionOnNextCall(testMax2);
+assertTrue(testMax2(true));
+
+function testMin1(b) {
+ const min = Math.min(1, b ? -0 : -1);
+ return Object.is(min, -0);
+}
+%PrepareFunctionForOptimization(testMin1);
+assertTrue(testMin1(true));
+assertTrue(testMin1(true));
+%OptimizeFunctionOnNextCall(testMin1);
+assertTrue(testMin1(true));
+
+function testMin2(b) {
+ const min = Math.min(b ? -0 : -1, 1);
+ return Object.is(min, -0);
+}
+%PrepareFunctionForOptimization(testMin2);
+assertTrue(testMin2(true));
+assertTrue(testMin2(true));
+%OptimizeFunctionOnNextCall(testMin2);
+assertTrue(testMin2(true));
diff --git a/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js b/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js
index a99894234a..8f110e1d21 100644
--- a/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js
@@ -29,11 +29,24 @@ assertFalse(/\u{10400}/ui.test("\u{10428}"));
assertFalse(/\ud801\udc00/ui.test("\u{10428}"));
assertFalse(/[\u{10428}]/ui.test("\u{10400}"));
assertFalse(/[\ud801\udc28]/ui.test("\u{10400}"));
-assertEquals(["\uff21\u{10400}"],
- /[\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc"));
-assertEquals(["abc"], /[^\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc\uff23"));
-assertEquals(["\uff53\u24bb"],
- /[\u24d5-\uff33]+/ui.exec("\uff54\uff53\u24bb\u24ba"));
+
+// TODO(v8:10120): Investigate why these don't behave as expected.
+{
+ // assertEquals(["\uff21\u{10400}"],
+ // /[\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc"));
+ assertEquals(["\u{10400}"],
+ /[\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc"));
+}
+{
+ // assertEquals(["abc"], /[^\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc\uff23"));
+ assertEquals(["\u{ff21}"], /[^\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc\uff23"));
+}
+{
+ // assertEquals(["\uff53\u24bb"],
+ // /[\u24d5-\uff33]+/ui.exec("\uff54\uff53\u24bb\u24ba"));
+ assertEquals(null,
+ /[\u24d5-\uff33]+/ui.exec("\uff54\uff53\u24bb\u24ba"));
+}
// Full mappings are ignored.
assertFalse(/\u00df/ui.test("SS"));
diff --git a/deps/v8/test/mjsunit/for-in-special-cases.js b/deps/v8/test/mjsunit/for-in-special-cases.js
index 744c948b94..5470985947 100644
--- a/deps/v8/test/mjsunit/for-in-special-cases.js
+++ b/deps/v8/test/mjsunit/for-in-special-cases.js
@@ -179,7 +179,7 @@ for_in_string_prototype();
}
})();
-(function for_in_prototype_change_element() {
+(function for_in_prototype_change_element1() {
let prototype1 = {prop: 0, prop1: 1};
let derived1 = {prop2: 2, prop3: 3};
@@ -199,6 +199,74 @@ for_in_string_prototype();
}
})();
+(function for_in_prototype_change_element2() {
+ Array.prototype.__proto__ = {'A': 1};
+ let array = ['a', 'b', 'c', 'd', 'e'];
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['0', '1', '2', '3', '4', 'A'], Accumulate(array));
+ }
+ Array.prototype[10] = 'b';
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['0', '1', '2', '3', '4', '10', 'A'], Accumulate(array));
+ }
+})();
+
+(function for_in_prototype_change_element3() {
+ let prototype = {prop: 0};
+ let holey_array = {
+ 1: 'a',
+ get 3() {
+ delete this[5];
+ return 'b';
+ },
+ 5: 'c'
+ };
+ Object.setPrototypeOf(holey_array, prototype);
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['1', '3', '5', 'prop'], Accumulate(holey_array));
+ }
+ prototype[10] = 'b';
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['1', '3', '5', '10', 'prop'], Accumulate(holey_array));
+ }
+ for (let i = 0; i < 3; i++) {
+ var accumulator = [];
+ for (var j in holey_array) {
+ accumulator.push(j);
+ holey_array[j];
+ }
+ assertEquals(['1', '3', '10', 'prop'], accumulator);
+ }
+})();
+
+(function for_in_prototype_change_element4() {
+ let prototype = {
+ 1: 'a',
+ get 3() {
+ delete this[5];
+ return 'b';
+ },
+ 5: 'c',
+ };
+ let holey_array = {7: 'd', 9: 'e'};
+ Object.setPrototypeOf(holey_array, prototype);
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['7', '9', '1', '3', '5'], Accumulate(holey_array));
+ }
+ prototype.prop = 0;
+ for (let i = 0; i < 3; i++) {
+ assertEquals(['7', '9', '1', '3', '5', 'prop'], Accumulate(holey_array));
+ }
+ for (let i = 0; i < 3; i++) {
+ var accumulator = [];
+ for (var j in holey_array) {
+ accumulator.push(j);
+ prototype[j];
+ }
+ assertEquals(['7', '9', '1', '3', 'prop'], accumulator);
+ }
+})();
+
(function for_in_non_enumerable1() {
let prototype1 = {prop: 0};
let derived1 = Object.create(prototype1, {
diff --git a/deps/v8/test/mjsunit/frozen-array-reduce.js b/deps/v8/test/mjsunit/frozen-array-reduce.js
index 6f121ae5fe..217dade6f1 100644
--- a/deps/v8/test/mjsunit/frozen-array-reduce.js
+++ b/deps/v8/test/mjsunit/frozen-array-reduce.js
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt --no-lazy-feedback-allocation
-/**
- * @fileoverview Test reduce and reduceRight
- */
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
function clone(v) {
// Shallow-copies arrays, returns everything else verbatim.
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-throw-caught.js b/deps/v8/test/mjsunit/harmony/async-generators-throw-caught.js
new file mode 100644
index 0000000000..152a87cce1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-generators-throw-caught.js
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let caught_in_gen = false;
+async function* catch_gen() {
+ try {
+ yield 42;
+ } catch (e) {
+ caught_in_gen = true;
+ }
+}
+
+(async () => {
+ const g = catch_gen();
+ await g.next();
+ try {
+ await g.throw(new Error()); // Should be caught in catch_gen, then catch_gen
+ // completes normally.
+ } catch (e) {
+ assertUnreachable();
+ }
+ assertTrue(caught_in_gen);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/bigint/property-names.js b/deps/v8/test/mjsunit/harmony/bigint/property-names.js
index 8c1763053c..ac65e1041d 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/property-names.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/property-names.js
@@ -3,5 +3,7 @@
// found in the LICENSE file.
var { 9007199254740991n: it } = { 9007199254740991n: 1 };
+assertEquals(it, 1);
+var { 999999999999999999n: it } = { 999999999999999999n: 1 }; // greater than max safe integer
assertEquals(it, 1);
diff --git a/deps/v8/test/mjsunit/harmony/private-methods-empty-inner.js b/deps/v8/test/mjsunit/harmony/private-methods-empty-inner.js
new file mode 100644
index 0000000000..20fb575694
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-methods-empty-inner.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+// This tests that empty inner classes don't assign private brands of outer
+// classes in their instances after scope chain deserialization.
+
+'use strict';
+
+class Outer {
+ constructor() {}
+#method(){}
+ factory() {
+ class Inner {
+ constructor() {}
+ }
+ return Inner;
+ }
+ run(obj) {
+ obj.#method();
+ }
+}
+
+const instance = new Outer();
+const Inner = instance.factory();
+// It should not pass the brand check.
+assertThrows(() => instance.run(new Inner()), TypeError);
+// It should pass the brand check.
+instance.run(new Outer());
diff --git a/deps/v8/test/mjsunit/harmony/private-methods.js b/deps/v8/test/mjsunit/harmony/private-methods.js
index b42e4f658c..7147c6d255 100644
--- a/deps/v8/test/mjsunit/harmony/private-methods.js
+++ b/deps/v8/test/mjsunit/harmony/private-methods.js
@@ -266,8 +266,10 @@
}
{
- // TODO(v8:9177): test extending a class expression that does not have
- // a private method.
+ class A extends class { } {
+ #a() {}
+ }
+
class D extends class {
#c() {}
} {
@@ -278,6 +280,7 @@
#e() {}
}
+ new A;
new D;
new E;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
index df599ebd40..9a1a99efe4 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -4,52 +4,52 @@
// Flags: --harmony-weak-refs
-(function TestConstructFinalizationGroup() {
- let fg = new FinalizationGroup(() => {});
- assertEquals(fg.toString(), "[object FinalizationGroup]");
+(function TestConstructFinalizationRegistry() {
+ let fg = new FinalizationRegistry(() => {});
+ assertEquals(fg.toString(), "[object FinalizationRegistry]");
assertNotSame(fg.__proto__, Object.prototype);
assertSame(fg.__proto__.__proto__, Object.prototype);
})();
-(function TestFinalizationGroupConstructorCallAsFunction() {
+(function TestFinalizationRegistryConstructorCallAsFunction() {
let caught = false;
let message = "";
try {
- let f = FinalizationGroup(() => {});
+ let f = FinalizationRegistry(() => {});
} catch (e) {
message = e.message;
caught = true;
} finally {
assertTrue(caught);
- assertEquals(message, "Constructor FinalizationGroup requires 'new'");
+ assertEquals(message, "Constructor FinalizationRegistry requires 'new'");
}
})();
-(function TestConstructFinalizationGroupCleanupNotCallable() {
- let message = "FinalizationGroup: cleanup must be callable";
- assertThrows(() => { let fg = new FinalizationGroup(); }, TypeError, message);
- assertThrows(() => { let fg = new FinalizationGroup(1); }, TypeError, message);
- assertThrows(() => { let fg = new FinalizationGroup(null); }, TypeError, message);
+(function TestConstructFinalizationRegistryCleanupNotCallable() {
+ let message = "FinalizationRegistry: cleanup must be callable";
+ assertThrows(() => { let fg = new FinalizationRegistry(); }, TypeError, message);
+ assertThrows(() => { let fg = new FinalizationRegistry(1); }, TypeError, message);
+ assertThrows(() => { let fg = new FinalizationRegistry(null); }, TypeError, message);
})();
-(function TestConstructFinalizationGroupWithCallableProxyAsCleanup() {
+(function TestConstructFinalizationRegistryWithCallableProxyAsCleanup() {
let handler = {};
let obj = () => {};
let proxy = new Proxy(obj, handler);
- let fg = new FinalizationGroup(proxy);
+ let fg = new FinalizationRegistry(proxy);
})();
-(function TestConstructFinalizationGroupWithNonCallableProxyAsCleanup() {
- let message = "FinalizationGroup: cleanup must be callable";
+(function TestConstructFinalizationRegistryWithNonCallableProxyAsCleanup() {
+ let message = "FinalizationRegistry: cleanup must be callable";
let handler = {};
let obj = {};
let proxy = new Proxy(obj, handler);
- assertThrows(() => { let fg = new FinalizationGroup(proxy); }, TypeError, message);
+ assertThrows(() => { let fg = new FinalizationRegistry(proxy); }, TypeError, message);
})();
(function TestRegisterWithNonObjectTarget() {
- let fg = new FinalizationGroup(() => {});
- let message = "FinalizationGroup.prototype.register: target must be an object";
+ let fg = new FinalizationRegistry(() => {});
+ let message = "FinalizationRegistry.prototype.register: target must be an object";
assertThrows(() => fg.register(1, "holdings"), TypeError, message);
assertThrows(() => fg.register(false, "holdings"), TypeError, message);
assertThrows(() => fg.register("foo", "holdings"), TypeError, message);
@@ -62,40 +62,40 @@
let handler = {};
let obj = {};
let proxy = new Proxy(obj, handler);
- let fg = new FinalizationGroup(() => {});
+ let fg = new FinalizationRegistry(() => {});
fg.register(proxy);
})();
(function TestRegisterTargetAndHoldingsSameValue() {
- let fg = new FinalizationGroup(() => {});
+ let fg = new FinalizationRegistry(() => {});
let obj = {a: 1};
// SameValue(target, holdings) not ok
assertThrows(() => fg.register(obj, obj), TypeError,
- "FinalizationGroup.prototype.register: target and holdings must not be same");
+ "FinalizationRegistry.prototype.register: target and holdings must not be same");
let holdings = {a: 1};
fg.register(obj, holdings);
})();
-(function TestRegisterWithoutFinalizationGroup() {
- assertThrows(() => FinalizationGroup.prototype.register.call({}, {}, "holdings"), TypeError);
+(function TestRegisterWithoutFinalizationRegistry() {
+ assertThrows(() => FinalizationRegistry.prototype.register.call({}, {}, "holdings"), TypeError);
// Does not throw:
- let fg = new FinalizationGroup(() => {});
- FinalizationGroup.prototype.register.call(fg, {}, "holdings");
+ let fg = new FinalizationRegistry(() => {});
+ FinalizationRegistry.prototype.register.call(fg, {}, "holdings");
})();
(function TestUnregisterWithNonExistentKey() {
- let fg = new FinalizationGroup(() => {});
+ let fg = new FinalizationRegistry(() => {});
let success = fg.unregister({"k": "whatever"});
assertFalse(success);
})();
-(function TestUnregisterWithNonFinalizationGroup() {
- assertThrows(() => FinalizationGroup.prototype.unregister.call({}, {}),
+(function TestUnregisterWithNonFinalizationRegistry() {
+ assertThrows(() => FinalizationRegistry.prototype.unregister.call({}, {}),
TypeError);
})();
(function TestUnregisterWithNonObjectUnregisterToken() {
- let fg = new FinalizationGroup(() => {});
+ let fg = new FinalizationRegistry(() => {});
assertThrows(() => fg.unregister(1), TypeError);
assertThrows(() => fg.unregister(1n), TypeError);
assertThrows(() => fg.unregister('one'), TypeError);
@@ -149,16 +149,16 @@
let wr = new WeakRef(proxy);
})();
-(function TestCleanupSomeWithoutFinalizationGroup() {
- assertThrows(() => FinalizationGroup.prototype.cleanupSome.call({}), TypeError);
+(function TestCleanupSomeWithoutFinalizationRegistry() {
+ assertThrows(() => FinalizationRegistry.prototype.cleanupSome.call({}), TypeError);
// Does not throw:
- let fg = new FinalizationGroup(() => {});
- let rv = FinalizationGroup.prototype.cleanupSome.call(fg);
+ let fg = new FinalizationRegistry(() => {});
+ let rv = FinalizationRegistry.prototype.cleanupSome.call(fg);
assertEquals(undefined, rv);
})();
(function TestCleanupSomeWithNonCallableCallback() {
- let fg = new FinalizationGroup(() => {});
+ let fg = new FinalizationRegistry(() => {});
assertThrows(() => fg.cleanupSome(1), TypeError);
assertThrows(() => fg.cleanupSome(1n), TypeError);
assertThrows(() => fg.cleanupSome(Symbol()), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
index ebc4ebf933..3f5133a87c 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
@@ -39,8 +39,8 @@ let cleanup = function(iter) {
}
}
-let fg = new FinalizationGroup(cleanup);
-// Create 3 objects and register them in the FinalizationGroup. The objects need
+let fg = new FinalizationRegistry(cleanup);
+// Create 3 objects and register them in the FinalizationRegistry. The objects need
// to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
index 97ab1dbd80..eac92486a0 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
@@ -9,9 +9,9 @@ let r = Realm.create();
let cleanup = Realm.eval(r, "var stored_global; function cleanup() { stored_global = globalThis; } cleanup");
let realm_global_this = Realm.eval(r, "globalThis");
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
-// Create an object and a register it in the FinalizationGroup. The object needs
+// Create an object and a register it in the FinalizationRegistry. The object needs
// to be inside a closure so that we can reliably kill them!
let weak_cell;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
index 077bc21e82..12282f7486 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
@@ -24,7 +24,7 @@ let cleanup = (iter) => {
for (holdings of iter) { }
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let o = null;
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
index 9cc548920c..3513c8f211 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
@@ -6,18 +6,31 @@
let cleanedUp = false;
let r = Realm.create();
-let FG = Realm.eval(r, "FinalizationGroup");
+let FG = Realm.eval(r, "FinalizationRegistry");
Realm.detachGlobal(r);
+let fg_not_run = new FG(() => {
+ assertUnreachable();
+});
+(() => {
+ fg_not_run.register({});
+})();
+
+gc();
+
+// Disposing the realm cancels the already scheduled fg_not_run's finalizer.
+Realm.dispose(r);
+
let fg = new FG(()=> {
cleanedUp = true;
});
+// FGs that are alive after disposal can still schedule tasks.
(() => {
let object = {};
fg.register(object, {});
- // object goes out of scope.
+ // object becomes unreachable.
})();
gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
index 1d275a19aa..f2374efc88 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
@@ -9,9 +9,9 @@ let r = Realm.create();
let cleanup = Realm.eval(r, "var stored_global; let cleanup = new Proxy(function() { stored_global = globalThis;}, {}); cleanup");
let realm_global_this = Realm.eval(r, "globalThis");
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
-// Create an object and register it in the FinalizationGroup. The object needs
+// Create an object and register it in the FinalizationRegistry. The object needs
// to be inside a closure so that we can reliably kill them!
let weak_cell;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
index 67ed64e85a..8e40fd6bbd 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
@@ -13,7 +13,7 @@ let cleanup = function(iter) {
++cleanup_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
(function() {
let o = {};
fg.register(o, "holdings");
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
index 363fc4a524..697c926a82 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
@@ -13,7 +13,7 @@ let cleanup = function(iter) {
++cleanup_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key = {"k": "this is the key"};
(function() {
let o = {};
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
index 06eb292dac..f757bdbf7e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
@@ -17,7 +17,7 @@ let cleanup2 = function(iter) {
++cleanup_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
(function() {
let o = {};
fg.register(o, "holdings");
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref
index 83de3a838b..144b56fce7 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-and-weakref.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref
@@ -16,7 +16,7 @@ let cleanup = function(iter) {
cleanup_called = true;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let weak_ref;
(function() {
let o = {};
@@ -25,7 +25,7 @@ let weak_ref;
})();
// Since the WeakRef was created during this turn, it is not cleared by GC. The
-// pointer inside the FinalizationGroup is not cleared either, since the WeakRef
+// pointer inside the FinalizationRegistry is not cleared either, since the WeakRef
// keeps the target object alive.
gc();
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
new file mode 100644
index 0000000000..732740f293
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_called = false;
+function cleanup(iter) {
+ [...iter];
+ cleanup_called = true;
+};
+(function() {
+ let fg = new FinalizationRegistry(cleanup);
+ (function() {
+ let x = {};
+ fg.register(x, {});
+ x = null;
+ })();
+ // Schedule fg for cleanup.
+ gc();
+})();
+
+// Collect fg, which should result in cleanup not called.
+gc();
+
+setTimeout(function() { assertFalse(cleanup_called); }, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalization-group-keeps-holdings-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
index ea35a2e63f..d8c00fcff8 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalization-group-keeps-holdings-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
@@ -16,18 +16,23 @@ let cleanup = function(iter) {
cleanup_called = true;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let o1 = {};
let holdings = {'a': 'this is the holdings object'};
-fg.register(o1, holdings);
+
+// Ignition holds references to objects in temporary registers. These will be
+// released when the function exits. So only access o inside a function to
+// prevent any references to objects in temporary registers when a gc is
+// triggered.
+(() => {fg.register(o1, holdings);})()
gc();
assertFalse(cleanup_called);
// Drop the last references to o1.
-o1 = null;
+(() => {o1 = null;})()
-// Drop the last reference to the holdings. The FinalizationGroup keeps it
+// Drop the last reference to the holdings. The FinalizationRegistry keeps it
// alive, so the cleanup function will be called as normal.
holdings = null;
gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
index a1cff3aaa0..de25bbe425 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationgroup-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
@@ -25,10 +25,10 @@ let cleanup1 = function(iter) {
++cleanup1_call_count;
}
-let fg0 = new FinalizationGroup(cleanup0);
-let fg1 = new FinalizationGroup(cleanup1);
+let fg0 = new FinalizationRegistry(cleanup0);
+let fg1 = new FinalizationRegistry(cleanup1);
-// Register 1 weak reference for each FinalizationGroup and kill the objects they point to.
+// Register 1 weak reference for each FinalizationRegistry and kill the objects they point to.
(function() {
// The objects need to be inside a closure so that we can reliably kill them.
let objects = [];
@@ -40,13 +40,13 @@ let fg1 = new FinalizationGroup(cleanup1);
// Drop the references to the objects.
objects = [];
-
- // Will schedule both fg0 and fg1 for cleanup.
- gc();
})();
+// Will schedule both fg0 and fg1 for cleanup.
+gc();
+
// Before the cleanup task has a chance to run, do the same thing again, so both
-// FinalizationGroups are (again) scheduled for cleanup. This has to be a IIFE function
+// FinalizationRegistries are (again) scheduled for cleanup. This has to be a IIFE function
// (so that we can reliably kill the objects) so we cannot use the same function
// as before.
(function() {
@@ -56,9 +56,10 @@ let fg1 = new FinalizationGroup(cleanup1);
fg0.register(objects[0], "holdings0-1");
fg1.register(objects[1], "holdings1-1");
objects = [];
- gc();
})();
+gc();
+
let timeout_func = function() {
assertEquals(1, cleanup0_call_count);
assertEquals(2, cleanup0_holdings_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js
index 73aac76378..c591b44a54 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js
@@ -21,18 +21,27 @@ let cleanup = function(iter) {
cleanup_called = true;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let o1 = {};
let o2 = {};
-fg.register(o1, 1);
-fg.register(o2, 2);
+
+// Ignition holds references to objects in temporary registers. These will be
+// released when the function exits. So only access o inside a function to
+// prevent any references to objects in temporary registers when a gc is
+(function() {
+ fg.register(o1, 1);
+ fg.register(o2, 2);
+})();
gc();
assertFalse(cleanup_called);
// Drop the last references to o1 and o2.
-o1 = null;
-o2 = null;
+(function() {
+ o1 = null;
+ o2 = null;
+})();
+
// GC will reclaim the target objects; the cleanup function will be called the
// next time we enter the event loop.
gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
index 51e721401a..fec0ab5b57 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
@@ -13,10 +13,10 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg1 = new FinalizationGroup(cleanup);
-let fg2 = new FinalizationGroup(cleanup);
+let fg1 = new FinalizationRegistry(cleanup);
+let fg2 = new FinalizationRegistry(cleanup);
-// Create two objects and register them in FinalizationGroups. The objects need
+// Create two objects and register them in FinalizationRegistries. The objects need
// to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
new file mode 100644
index 0000000000..09854f0556
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let call_count = 0;
+let reentrant_gc =
+ function(iter) {
+ [...iter];
+ gc();
+ call_count++;
+}
+
+let fg = new FinalizationRegistry(reentrant_gc);
+
+(function() {
+fg.register({}, 42);
+})();
+
+gc();
+
+setTimeout(function() {
+ assertEquals(1, call_count);
+}, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
index ac3dc6041a..7c09cf5985 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
@@ -14,9 +14,9 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
-// Create an object and register it in the FinalizationGroup. The object needs to be inside
+// Create an object and register it in the FinalizationRegistry. The object needs to be inside
// a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index 3db18e016e..e8a7843c71 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -14,9 +14,9 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationGroup. The object needs
+// Create an object and register it in the FinalizationRegistry. The object needs
// to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index c54dbb25de..ff576b4dfe 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -9,9 +9,9 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationGroup. The object needs
+// Create an object and register it in the FinalizationRegistry. The object needs
// to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
index d1dd8f8b75..e7604eecec 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -9,9 +9,9 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationGroup. The object needs
+// Create an object and register it in the FinalizationRegistry. The object needs
// to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
index 80ca85f619..a62e6ed923 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
@@ -17,9 +17,9 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key = {"k": "the key"};
-// Create an object and register it in the FinalizationGroup. The object needs
+// Create an object and register it in the FinalizationRegistry. The object needs
// to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index 038f5093e1..e26d9a1921 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -17,8 +17,8 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
-// Create an object and register it in the FinalizationGroup. The object needs to be inside
+let fg = new FinalizationRegistry(cleanup);
+// Create an object and register it in the FinalizationRegistry. The object needs to be inside
// a closure so that we can reliably kill them!
let key = {"k": "this is the key"};
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index af6b5c13ed..8f28673205 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -17,10 +17,10 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key = {"k": "this is the key"};
-// Create an object and register it in the FinalizationGroup. The object needs to be inside
+// Create an object and register it in the FinalizationRegistry. The object needs to be inside
// a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
index 8a0a5d5707..a7ab9d18df 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
@@ -22,10 +22,10 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key1 = {"k": "first key"};
let key2 = {"k": "second key"};
-// Create two objects and register them in the FinalizationGroup. The objects
+// Create two objects and register them in the FinalizationRegistry. The objects
// need to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js
index 974485e9cb..30926d1d56 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js
@@ -21,8 +21,8 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
-// Create an object and register it in the FinalizationGroup. The object needs to be inside
+let fg = new FinalizationRegistry(cleanup);
+// Create an object and register it in the FinalizationRegistry. The object needs to be inside
// a closure so that we can reliably kill them!
let key = {"k": "this is the key"};
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
index 8be0db1444..3512fc9217 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -14,10 +14,10 @@ let cleanup = function(iter) {
++cleanup_call_count;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let key1 = {"k": "key1"};
let key2 = {"k": "key2"};
-// Create three objects and register them in the FinalizationGroup. The objects
+// Create three objects and register them in the FinalizationRegistry. The objects
// need to be inside a closure so that we can reliably kill them!
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 4ad4425a4c..5117997965 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -10,8 +10,8 @@ let cleanup = function(iter) {
}
let key = {"k": "this is my key"};
-let fg = new FinalizationGroup(cleanup);
-// Create an object and register it in the FinalizationGroup. The object needs to be inside
+let fg = new FinalizationRegistry(cleanup);
+// Create an object and register it in the FinalizationRegistry. The object needs to be inside
// a closure so that we can reliably kill them!
(function() {
@@ -25,7 +25,7 @@ let fg = new FinalizationGroup(cleanup);
gc();
assertEquals(0, cleanup_call_count);
-// Unregister the object from the FinalizationGroup before cleanup has ran.
+// Unregister the object from the FinalizationRegistry before cleanup has ran.
let success = fg.unregister(key);
assertTrue(success);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
index 170a52df10..f3480f78d1 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -15,16 +15,22 @@ let cleanup = function(iter) {
cleanup_called = true;
}
-let fg = new FinalizationGroup(cleanup);
+let fg = new FinalizationRegistry(cleanup);
let o = {};
let holdings = {'h': 55};
-fg.register(o, holdings);
+
+// Ignition holds references to objects in temporary registers. These will be
+// released when the function exits. So only access o inside a function to
+// prevent any references to objects in temporary registers when a gc is
+// triggered.
+(() => { fg.register(o, holdings); })()
gc();
assertFalse(cleanup_called);
// Drop the last reference to o.
-o = null;
+(() => { o = null; })()
+
// GC will clear the WeakCell; the cleanup function will be called the next time
// we enter the event loop.
gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
index 8fdc803c56..94f5ce6a90 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
@@ -4,7 +4,7 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-var FG = new FinalizationGroup (function (iter) { globalThis.FGRan = true; });
+var FG = new FinalizationRegistry (function (iter) { globalThis.FRRan = true; });
{
let obj = {};
// obj is its own unregister token and becomes unreachable after this
@@ -14,7 +14,7 @@ var FG = new FinalizationGroup (function (iter) { globalThis.FGRan = true; });
}
function tryAgain() {
gc();
- if (globalThis.FGRan || FG.cleanupSome()) {
+ if (globalThis.FRRan || FG.cleanupSome()) {
return;
}
setTimeout(tryAgain, 0);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index fd45ebacbd..58dcd6c9ed 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -548,13 +548,14 @@ var prettyPrinted;
assertInstanceof = function assertInstanceof(obj, type) {
if (!(obj instanceof type)) {
var actualTypeName = null;
- var actualConstructor = Object.getPrototypeOf(obj).constructor;
- if (typeof actualConstructor === "function") {
+ var actualConstructor = obj && Object.getPrototypeOf(obj).constructor;
+ if (typeof actualConstructor === 'function') {
actualTypeName = actualConstructor.name || String(actualConstructor);
}
- failWithMessage("Object <" + prettyPrinted(obj) + "> is not an instance of <" +
- (type.name || type) + ">" +
- (actualTypeName ? " but of <" + actualTypeName + ">" : ""));
+ failWithMessage(
+ 'Object <' + prettyPrinted(obj) + '> is not an instance of <' +
+ (type.name || type) + '>' +
+ (actualTypeName ? ' but of <' + actualTypeName + '>' : ''));
}
};
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index dd3b2dcb87..989c097908 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -62,11 +62,8 @@
# Issue 9380: Memory leaks of shared WebAssembly.Memory objects
'wasm/shared-memory-worker-gc': [SKIP],
- # crbug.com/v8/10120: This test is meant to pass (only) in no_i18n builds,
- # but currently doesn't:
- 'es6/unicode-regexp-ignore-case-noi18n': [FAIL],
- # Once the bug is fixed, the expectations should be:
- # 'es6/unicode-regexp-ignore-case-noi18n': [PASS, ['no_i18n == False', FAIL]],
+ # BUG(v8:10197)
+ 'regress/regress-748069': [SKIP],
##############################################################################
# Tests where variants make no sense.
@@ -158,6 +155,10 @@
# Predictable tests fail due to race between postMessage and GrowMemory
'regress/wasm/regress-1010272': [PASS, NO_VARIANTS, ['system == android', SKIP], ['predictable', SKIP]],
+ # Only makes sense in the no_i18n variant.
+ 'es6/unicode-regexp-ignore-case-noi18n':
+ [['no_i18n == True', PASS], ['no_i18n == False', FAIL]],
+
# Needs to be adapted after changes to Function constructor. chromium:1065094
'cross-realm-filtering': [SKIP],
}], # ALWAYS
@@ -309,6 +310,7 @@
'regress/regress-crbug-772056': [SKIP],
'regress/regress-crbug-816961': [SKIP],
'regress/regress-crbug-969498': [SKIP],
+ 'regress/regress-crbug-1047368': [SKIP],
'regress/wasm/*': [SKIP],
'regress/regress-8947': [SKIP],
'regress/regress-9165': [SKIP],
@@ -382,16 +384,17 @@
##############################################################################
['no_i18n', {
- # case-insensitive unicode regexp relies on case mapping provided by ICU.
+ # Case-insensitive unicode regexp relies on case mapping provided by ICU.
'es6/unicode-regexp-ignore-case': [FAIL],
'regress/regress-5036': [FAIL],
'es7/regexp-ui-word': [FAIL],
'regexp-modifiers-i18n': [FAIL],
'regexp-modifiers-autogenerated-i18n': [FAIL],
- # desugaring regexp property class relies on ICU.
- 'harmony/regexp-property-*': [FAIL],
- 'harmony/regexp-property-invalid': [PASS],
- 'regress/regress-793588': [FAIL],
+
+ # Desugaring regexp property class relies on ICU. Anything goes as long as we
+ # don't crash.
+ 'harmony/regexp-property-*': [PASS,FAIL],
+ 'regress/regress-793588': [PASS,FAIL],
# noi18n build cannot parse characters in supplementary plane.
'harmony/regexp-named-captures': [FAIL],
@@ -431,6 +434,7 @@
# 32-bit platforms
['arch in (ia32, arm, mips, mipsel)', {
# Needs >2GB of available contiguous memory.
+ 'wasm/grow-huge-memory': [SKIP],
'wasm/huge-memory': [SKIP],
'wasm/huge-typedarray': [SKIP],
}], # 'arch in (ia32, arm, mips, mipsel)'
@@ -796,6 +800,7 @@
# OOM:
'regress/regress-752764': [FAIL],
# Flaky OOM:
+ 'regress/regress-748069': [SKIP],
'regress/regress-779407': [SKIP],
'regress/regress-852258': [SKIP],
}], # 'system == android'
@@ -848,6 +853,8 @@
'compiler/serializer-dead-after-jump': [SKIP],
'compiler/serializer-dead-after-return': [SKIP],
'compiler/serializer-transition-propagation': [SKIP],
+ 'regress/regress-1049982-1': [SKIP],
+ 'regress/regress-1049982-2': [SKIP],
# Bounds check triggers forced deopt for array constructors.
'array-constructor-feedback': [SKIP],
@@ -858,9 +865,6 @@
# Forced optimisation path tests.
'shared-function-tier-up-turbo': [SKIP],
- # Fails deopt_fuzzer due to --deopt_every_n_times
- 'es6/array-iterator-turbo': [SKIP],
-
# Too slow tests.
'regress/regress-740784': [SKIP],
@@ -983,7 +987,6 @@
['variant == stress', {
# Slow tests.
'array-natives-elements': [SKIP],
- 'es6/array-iterator-turbo': [SKIP],
'ignition/regress-599001-verifyheap': [SKIP],
'unicode-test': [SKIP],
@@ -1127,9 +1130,13 @@
'compiler/serializer-transition-propagation': [SKIP],
# Some tests rely on inlining.
+ 'compiler/inlined-call-polymorphic': [SKIP],
'compiler/opt-higher-order-functions': [SKIP],
'regress/regress-1049982-1': [SKIP],
'regress/regress-1049982-2': [SKIP],
+
+ # interrupt_budget overrides don't work with TurboProp.
+ 'interrupt-budget-override': [SKIP],
}], # variant == turboprop
##############################################################################
@@ -1145,4 +1152,12 @@
'wasm/many-modules': [SKIP],
}], # variant == stress_js_bg_compile_wasm_code_gc
+##############################################################################
+['variant == assert_types', {
+ # Type assertions can lead to differences in representation selection,
+ # which in turn can lead to different deopt behavior.
+ 'compiler/number-abs': [SKIP],
+ 'compiler/number-toboolean': [SKIP],
+}], # variant == assert_types
+
]
diff --git a/deps/v8/test/mjsunit/non-extensible-array-reduce.js b/deps/v8/test/mjsunit/non-extensible-array-reduce.js
index 1bc87b7add..c90407fa4c 100644
--- a/deps/v8/test/mjsunit/non-extensible-array-reduce.js
+++ b/deps/v8/test/mjsunit/non-extensible-array-reduce.js
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt --no-lazy-feedback-allocation
-/**
- * @fileoverview Test reduce and reduceRight
- */
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
function clone(v) {
// Shallow-copies arrays, returns everything else verbatim.
diff --git a/deps/v8/test/mjsunit/optimized-array-every.js b/deps/v8/test/mjsunit/optimized-array-every.js
index 30578c55f0..57f2fa6f19 100644
--- a/deps/v8/test/mjsunit/optimized-array-every.js
+++ b/deps/v8/test/mjsunit/optimized-array-every.js
@@ -3,7 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
-// Flags: --no-always-opt
+// Flags: --no-always-opt --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
// Early exit from every functions properly.
(() => {
diff --git a/deps/v8/test/mjsunit/optimized-array-find.js b/deps/v8/test/mjsunit/optimized-array-find.js
index b8f3baa28c..9ca2f09b22 100644
--- a/deps/v8/test/mjsunit/optimized-array-find.js
+++ b/deps/v8/test/mjsunit/optimized-array-find.js
@@ -3,7 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
-// Flags: --no-always-opt
+// Flags: --no-always-opt --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
// Unknown field access leads to soft-deopt unrelated to find, should still
// lead to correct result.
diff --git a/deps/v8/test/mjsunit/optimized-array-findindex.js b/deps/v8/test/mjsunit/optimized-array-findindex.js
index 299ae0a2bf..d0b69e646e 100644
--- a/deps/v8/test/mjsunit/optimized-array-findindex.js
+++ b/deps/v8/test/mjsunit/optimized-array-findindex.js
@@ -3,7 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
-// Flags: --no-always-opt
+// Flags: --no-always-opt --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
// Unknown field access leads to soft-deopt unrelated to findIndex, should still
// lead to correct result.
diff --git a/deps/v8/test/mjsunit/optimized-array-some.js b/deps/v8/test/mjsunit/optimized-array-some.js
index 73862702a2..ede0449244 100644
--- a/deps/v8/test/mjsunit/optimized-array-some.js
+++ b/deps/v8/test/mjsunit/optimized-array-some.js
@@ -3,7 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo-inline-array-builtins --opt
-// Flags: --no-always-opt
+// Flags: --no-always-opt --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
// Early exit from some functions properly.
(() => {
diff --git a/deps/v8/test/mjsunit/optimized-filter.js b/deps/v8/test/mjsunit/optimized-filter.js
index 97eb1f2378..e14ed69e6f 100644
--- a/deps/v8/test/mjsunit/optimized-filter.js
+++ b/deps/v8/test/mjsunit/optimized-filter.js
@@ -3,7 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
-// Flags: --opt --no-always-opt
+// Flags: --opt --no-always-opt --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
// Unknown field access leads to soft-deopt unrelated to filter, should still
// lead to correct result.
diff --git a/deps/v8/test/mjsunit/optimized-foreach.js b/deps/v8/test/mjsunit/optimized-foreach.js
index 3f39d3cd2e..c114f54f55 100644
--- a/deps/v8/test/mjsunit/optimized-foreach.js
+++ b/deps/v8/test/mjsunit/optimized-foreach.js
@@ -3,6 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
+// Flags: --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
var a = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
diff --git a/deps/v8/test/mjsunit/optimized-map.js b/deps/v8/test/mjsunit/optimized-map.js
index 1095f7baf2..9893428576 100644
--- a/deps/v8/test/mjsunit/optimized-map.js
+++ b/deps/v8/test/mjsunit/optimized-map.js
@@ -3,7 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
-// Flags: --opt --no-always-opt
+// Flags: --opt --no-always-opt --no-lazy-feedback-allocation
+
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,0,0];
var b = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
diff --git a/deps/v8/test/mjsunit/prepare-missing-label-syntax-error.js b/deps/v8/test/mjsunit/prepare-missing-label-syntax-error.js
new file mode 100644
index 0000000000..9aa13aa8fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/prepare-missing-label-syntax-error.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("function f() { break }", SyntaxError);
+assertThrows("function f() { break a }", SyntaxError);
+assertThrows("function f() { continue }", SyntaxError);
+assertThrows("function f() { continue a }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index 0cad7e7164..e652525914 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
function testEscape(str, regex) {
assertEquals("foo:bar:baz", str.split(regex).join(":"));
}
@@ -835,3 +837,13 @@ assertEquals("[/]]", /[/]]/.source);
assertEquals("[[/]]", /[[/]]/.source);
assertEquals("[[\\/]", /[[\/]/.source);
assertEquals("[[\\/]]", /[[\/]]/.source);
+assertEquals("\\n", new RegExp("\\\n").source);
+assertEquals("\\r", new RegExp("\\\r").source);
+assertEquals("\\u2028", new RegExp("\\\u2028").source);
+assertEquals("\\u2029", new RegExp("\\\u2029").source);
+
+{
+ // No escapes needed, the original string should be reused as `.source`.
+ const pattern = "\\n";
+ assertTrue(%ReferenceEqual(pattern, new RegExp(pattern).source));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1049982-1.js b/deps/v8/test/mjsunit/regress/regress-1049982-1.js
index dd170386db..8400646ecb 100644
--- a/deps/v8/test/mjsunit/regress/regress-1049982-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-1049982-1.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt
const xs = [1,2,3,4,5,6,7,8,9];
let deopt = false;
diff --git a/deps/v8/test/mjsunit/regress/regress-1049982-2.js b/deps/v8/test/mjsunit/regress/regress-1049982-2.js
index 90e96b8d5b..d7fe4a5f3a 100644
--- a/deps/v8/test/mjsunit/regress/regress-1049982-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-1049982-2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt
const xs = [1,2,3,4,5,6,7,8,9];
let deopt = false;
diff --git a/deps/v8/test/mjsunit/regress-906893.js b/deps/v8/test/mjsunit/regress/regress-906893.js
index 981b5824cb..981b5824cb 100644
--- a/deps/v8/test/mjsunit/regress-906893.js
+++ b/deps/v8/test/mjsunit/regress/regress-906893.js
diff --git a/deps/v8/test/mjsunit/regress-918763.js b/deps/v8/test/mjsunit/regress/regress-918763.js
index 2e70536a18..2e70536a18 100644
--- a/deps/v8/test/mjsunit/regress-918763.js
+++ b/deps/v8/test/mjsunit/regress/regress-918763.js
diff --git a/deps/v8/test/mjsunit/regress-930045.js b/deps/v8/test/mjsunit/regress/regress-930045.js
index 8983c2014a..8983c2014a 100644
--- a/deps/v8/test/mjsunit/regress-930045.js
+++ b/deps/v8/test/mjsunit/regress/regress-930045.js
diff --git a/deps/v8/test/mjsunit/regress-932101.js b/deps/v8/test/mjsunit/regress/regress-932101.js
index 720ee11fef..720ee11fef 100644
--- a/deps/v8/test/mjsunit/regress-932101.js
+++ b/deps/v8/test/mjsunit/regress/regress-932101.js
diff --git a/deps/v8/test/mjsunit/regress-952682.js b/deps/v8/test/mjsunit/regress/regress-952682.js
index dd0e687bf0..dd0e687bf0 100644
--- a/deps/v8/test/mjsunit/regress-952682.js
+++ b/deps/v8/test/mjsunit/regress/regress-952682.js
diff --git a/deps/v8/test/mjsunit/regress-956426.js b/deps/v8/test/mjsunit/regress/regress-956426.js
index 93ccd7d36d..93ccd7d36d 100644
--- a/deps/v8/test/mjsunit/regress-956426.js
+++ b/deps/v8/test/mjsunit/regress/regress-956426.js
diff --git a/deps/v8/test/mjsunit/regress-958725.js b/deps/v8/test/mjsunit/regress/regress-958725.js
index bb0bd11055..bb0bd11055 100644
--- a/deps/v8/test/mjsunit/regress-958725.js
+++ b/deps/v8/test/mjsunit/regress/regress-958725.js
diff --git a/deps/v8/test/mjsunit/regress-963346.js b/deps/v8/test/mjsunit/regress/regress-963346.js
index 8f29556210..8f29556210 100644
--- a/deps/v8/test/mjsunit/regress-963346.js
+++ b/deps/v8/test/mjsunit/regress/regress-963346.js
diff --git a/deps/v8/test/mjsunit/regress-966460.js b/deps/v8/test/mjsunit/regress/regress-966460.js
index 8acf49b5a5..8acf49b5a5 100644
--- a/deps/v8/test/mjsunit/regress-966460.js
+++ b/deps/v8/test/mjsunit/regress/regress-966460.js
diff --git a/deps/v8/test/mjsunit/regress-crbug-1025468.js b/deps/v8/test/mjsunit/regress/regress-crbug-1025468.js
index bbfe72c4c0..bbfe72c4c0 100644
--- a/deps/v8/test/mjsunit/regress-crbug-1025468.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1025468.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1047368.js b/deps/v8/test/mjsunit/regress/regress-crbug-1047368.js
new file mode 100644
index 0000000000..800cf61879
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1047368.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --wasm-staging
+
+new WebAssembly.Function({
+ parameters: [],
+ results: []
+ }, x => x);
+const long_variable = {
+ toString: () => {
+ }
+};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1057653.js b/deps/v8/test/mjsunit/regress/regress-crbug-1057653.js
new file mode 100644
index 0000000000..343e72367a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1057653.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.prototype.length = 3642395160;
+const array = new Float32Array(2**27);
+
+assertThrows(() => {for (const key in array) {}}, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1059738.js b/deps/v8/test/mjsunit/regress/regress-crbug-1059738.js
new file mode 100644
index 0000000000..1b10bb4213
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1059738.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {4294967295: ({4294967295: NaN}) + "foo"};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1072947.js b/deps/v8/test/mjsunit/regress/regress-crbug-1072947.js
new file mode 100644
index 0000000000..6a7518a46c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1072947.js
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ class reg extends RegExp {}
+
+ let r;
+ function trigger() {
+ try {
+ trigger();
+ } catch {
+ Reflect.construct(RegExp,[],reg);
+ }
+ }
+ trigger();
+})();
+
+(function() {
+ class reg extends Function {}
+
+ let r;
+ function trigger() {
+ try {
+ trigger();
+ } catch {
+ Reflect.construct(RegExp,[],reg);
+ }
+ }
+ trigger();
+})();
diff --git a/deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js b/deps/v8/test/mjsunit/regress/regress-regexp-functional-replace-slow.js
index 033bcee1ef..033bcee1ef 100644
--- a/deps/v8/test/mjsunit/regress-regexp-functional-replace-slow.js
+++ b/deps/v8/test/mjsunit/regress/regress-regexp-functional-replace-slow.js
diff --git a/deps/v8/test/mjsunit/regress-v8-8445-2.js b/deps/v8/test/mjsunit/regress/regress-v8-8445-2.js
index a6145805d3..a6145805d3 100644
--- a/deps/v8/test/mjsunit/regress-v8-8445-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8445-2.js
diff --git a/deps/v8/test/mjsunit/regress-v8-8445.js b/deps/v8/test/mjsunit/regress/regress-v8-8445.js
index 1bed026d42..1bed026d42 100644
--- a/deps/v8/test/mjsunit/regress-v8-8445.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8445.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10126-streaming.js b/deps/v8/test/mjsunit/regress/wasm/regress-10126-streaming.js
new file mode 100644
index 0000000000..bb6df82f55
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10126-streaming.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming
+
+load('test/mjsunit/regress/wasm/regress-10126.js')
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10126.js b/deps/v8/test/mjsunit/regress/wasm/regress-10126.js
new file mode 100644
index 0000000000..4985dfedb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10126.js
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js')
+
+let binary = new Binary;
+binary.emit_bytes([
+ kWasmH0, // 0 header
+ kWasmH1, // 1 -
+ kWasmH2, // 2 -
+ kWasmH3, // 3 -
+ kWasmV0, // 4 version
+ kWasmV1, // 5 -
+ kWasmV2, // 6 -
+ kWasmV3, // 7 -
+ kUnknownSectionCode, // 8 custom section
+ 0x5, // 9 length
+ 0x6, // 10 invalid name length
+ 'a', // 11 payload
+ 'b', // 12 -
+ 'c', // 13 -
+ 'd', // 14 -
+ kCodeSectionCode, // 15 code section start
+ 0x1, // 16 code section length
+ 19, // 17 invalid number of functions
+]);
+
+const buffer = binary.trunc_buffer();
+assertThrowsAsync(
+ WebAssembly.compile(buffer), WebAssembly.CompileError,
+ 'WebAssembly.compile(): expected 6 bytes, fell off end @+11');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10309.js b/deps/v8/test/mjsunit/regress/wasm/regress-10309.js
new file mode 100644
index 0000000000..ce73b783ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10309.js
@@ -0,0 +1,64 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd
+
+let registry = {};
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+let f32 = Math.fround;
+
+// simple.wast:1
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x09\x02\x60\x00\x00\x60\x01\x7f\x01\x7d\x03\x04\x03\x00\x00\x01\x05\x03\x01\x00\x01\x07\x1c\x02\x11\x72\x65\x70\x6c\x61\x63\x65\x5f\x6c\x61\x6e\x65\x5f\x74\x65\x73\x74\x00\x01\x04\x72\x65\x61\x64\x00\x02\x08\x01\x00\x0a\x6e\x03\x2a\x00\x41\x10\x43\x00\x00\x80\x3f\x38\x02\x00\x41\x14\x43\x00\x00\x00\x40\x38\x02\x00\x41\x18\x43\x00\x00\x40\x40\x38\x02\x00\x41\x1c\x43\x00\x00\x80\x40\x38\x02\x00\x0b\x39\x01\x01\x7b\x41\x10\x2a\x02\x00\xfd\x12\x21\x00\x20\x00\x41\x10\x2a\x01\x04\xfd\x14\x01\x21\x00\x20\x00\x41\x10\x2a\x01\x08\xfd\x14\x02\x21\x00\x20\x00\x41\x10\x2a\x01\x0c\xfd\x14\x03\x21\x00\x41\x00\x20\x00\xfd\x01\x02\x00\x0b\x07\x00\x20\x00\x2a\x02\x00\x0b");
+
+// simple.wast:49
+call($1, "replace_lane_test", []);
+
+// simple.wast:50
+assert_return(() => call($1, "read", [0]), f32(1.0));
+
+// simple.wast:51
+assert_return(() => call($1, "read", [4]), f32(2.0));
+
+// simple.wast:52
+assert_return(() => call($1, "read", [8]), f32(3.0));
+
+// simple.wast:53
+assert_return(() => call($1, "read", [12]), f32(4.0));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1045225.js b/deps/v8/test/mjsunit/regress/wasm/regress-1045225.js
new file mode 100644
index 0000000000..3ac81f5485
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1045225.js
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(16, 32, false, true);
+ builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+ // Generate function 1 (out of 1).
+ builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x80, 0x01,
+kExprI32Clz,
+kExprI32Const, 0x00,
+kExprI64Const, 0x00,
+kAtomicPrefix, kExprI64AtomicStore8U, 0x00, 0x00,
+kExprEnd, // @13
+ ]);
+ builder.addExport('main', 0);
+ const instance = builder.instantiate();
+ print(instance.exports.main(1, 2, 3));
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1046472.js b/deps/v8/test/mjsunit/regress/wasm/regress-1046472.js
new file mode 100644
index 0000000000..056006b926
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1046472.js
@@ -0,0 +1,33 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --liftoff --no-wasm-tier-up --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(16, 32, false);
+ builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+ // Generate function 1 (out of 1).
+ builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x20,
+kExprI64LoadMem, 0x00, 0xce, 0xf2, 0xff, 0x01,
+kExprBlock, kWasmF32, // @9 f32
+ kExprI32Const, 0x04,
+ kExprI32Const, 0x01,
+ kExprBrTable, 0x01, 0x01, 0x00, // entries=1
+ kExprEnd, // @19
+kExprUnreachable,
+kExprEnd, // @21
+ ]);
+ builder.addExport('main', 0);
+ assertThrows(
+ () => {builder.toModule()}, WebAssembly.CompileError,
+ 'WebAssembly.Module(): Compiling function #0:\"main\" failed: type ' +
+ 'error in merge[0] (expected <bot>, got i32) @+57');
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1048241.js b/deps/v8/test/mjsunit/regress/wasm/regress-1048241.js
new file mode 100644
index 0000000000..5e0e43c9ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1048241.js
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+const sig = makeSig([kWasmF64, kWasmI64, kWasmI32, kWasmF64], []);
+builder.addFunction(undefined, sig).addBody([
+ kExprI32Const, 0x00, // -
+ kExprI32Const, 0x00, // -
+ kExprI32Const, 0x00, // -
+ kAtomicPrefix, kExprI32AtomicXor16U, 0x01, 0x00, // -
+ kAtomicPrefix, kExprI32AtomicStore8U, 0x00, 0x00, // -
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1051912.js b/deps/v8/test/mjsunit/regress/wasm/regress-1051912.js
new file mode 100644
index 0000000000..49ffa8cf8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1051912.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js')
+
+let binary = new Binary();
+binary.emit_header();
+binary.emit_bytes([kTypeSectionCode, 4, 1, kWasmFunctionTypeForm, 0, 0]);
+binary.emit_bytes([kFunctionSectionCode, 2, 1, 0]);
+binary.emit_bytes([kCodeSectionCode, 6, 1, 4]);
+binary.emit_bytes([kUnknownSectionCode, 2, 1, 0]);
+binary.emit_bytes([kUnknownSectionCode, 2, 1, 0]);
+binary.emit_bytes([kUnknownSectionCode, 2, 1, 0]);
+binary.emit_bytes([ kExprEnd]);
+let buffer = binary.trunc_buffer();
+WebAssembly.compile(buffer);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js b/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
new file mode 100644
index 0000000000..85ee43732b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
@@ -0,0 +1,52 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addLocals({i32_count: 2}).addLocals({f32_count: 2})
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0xf9, 0x00, // i32.const
+kExprI32Ior, // i32.or
+kExprI32Eqz, // i32.eqz
+kExprI32Add, // i32.Add
+kSimdPrefix, kExprI32x4Splat, // i32x4.splat
+kExprF32Const, 0x46, 0x5d, 0x00, 0x00, // f32.const
+kExprI32Const, 0x83, 0x01, // i32.const
+kExprI32Const, 0x83, 0x01, // i32.const
+kExprI32Const, 0x83, 0x01, // i32.const
+kExprI32Add, // i32.Add
+kExprI32Add, // i32.Add
+kExprIf, kWasmI32, // if @33 i32
+ kExprI32Const, 0x00, // i32.const
+kExprElse, // else @37
+ kExprI32Const, 0x00, // i32.const
+ kExprEnd, // end @40
+kExprIf, kWasmI32, // if @41 i32
+ kExprI32Const, 0x00, // i32.const
+kExprElse, // else @45
+ kExprI32Const, 0x00, // i32.const
+ kExprEnd, // end @48
+kExprF32ReinterpretI32, // f32.reinterpret_i32
+kExprF32Max, // f32.max
+kSimdPrefix, kExprF32x4Splat, // f32x4.splat
+kExprI32Const, 0x83, 0x01, // i32.const
+kSimdPrefix, kExprI32x4Splat, // i32x4.splat
+kSimdPrefix, kExprI32x4Add, // i32x4.add
+kSimdPrefix, kExprI32x4Add, // i32x4.add
+kSimdPrefix, kExprS1x8AnyTrue, // s1x8.any_true
+kExprEnd, // end @64
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+print(instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1055692.js b/deps/v8/test/mjsunit/regress/wasm/regress-1055692.js
new file mode 100644
index 0000000000..a16180ab5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1055692.js
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --wasm-interpret-all --experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x75, // i32.const
+kExprI32Const, 0x74, // i32.const
+kExprI32Const, 0x18, // i32.const
+kSimdPrefix, kExprS8x16LoadSplat, // s8x16.load_splat
+kExprUnreachable, // unreachable
+kExprUnreachable, // unreachable
+kExprI32Const, 0x6f, // i32.const
+kExprI32Const, 0x7f, // i32.const
+kExprI32Const, 0x6f, // i32.const
+kExprDrop,
+kExprDrop,
+kExprDrop,
+kExprDrop,
+kExprDrop,
+kExprEnd, // end @18
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+print(instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-724846.js b/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
index 93220a1c9c..62235bf517 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-724846.js
@@ -10,4 +10,4 @@ let builder = new WasmModuleBuilder();
const num_pages = 49152;
builder.addMemory(num_pages, num_pages);
// num_pages * 64k (page size) > kMaxInt.
-assertThrows(() => builder.instantiate(), RangeError);
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-789952.js b/deps/v8/test/mjsunit/regress/wasm/regress-789952.js
deleted file mode 100644
index f73d8dc471..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-789952.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let module_size = 19;
-let string_len = 0x00fffff0 - module_size;
-
-print("Allocating backing store: " + (string_len + module_size));
-let backing = new ArrayBuffer(string_len + module_size);
-
-print("Allocating typed array buffer");
-let buffer = new Uint8Array(backing);
-
-print("Filling...");
-buffer.fill(0x41);
-
-print("Setting up array buffer");
-// Magic
-buffer.set([0x00, 0x61, 0x73, 0x6D], 0);
-// Version
-buffer.set([0x01, 0x00, 0x00, 0x00], 4);
-// kUnknownSection (0)
-buffer.set([0], 8);
-// Section length
-buffer.set([0x80, 0x80, 0x80, 0x80, 0x00], 9);
-// Name length
-let x = string_len + 1;
-let b1 = ((x >> 0) & 0x7F) | 0x80;
-let b2 = ((x >> 7) & 0x7F) | 0x80;
-let b3 = ((x >> 14) & 0x7F) | 0x80;
-let b4 = ((x >> 21) & 0x7F);
-//buffer.set([0xDE, 0xFF, 0xFF, 0x7F], 14);
- buffer.set([b1, b2, b3, b4], 14);
-
-print("Parsing module...");
-let m = new WebAssembly.Module(buffer);
-
-print("Triggering!");
-let c = WebAssembly.Module.customSections(m, "A".repeat(string_len + 1));
-assertEquals(0, c.length);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1057094.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1057094.js
new file mode 100644
index 0000000000..dc1703178c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1057094.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-mem-pages=65536
+
+try {
+ var __v_50189 = new WebAssembly.Memory({
+ initial: 65536
+ });
+} catch (e) {
+ // 32-bit builds will throw a RangeError, that's okay.
+ assertTrue(e instanceof RangeError);
+}
diff --git a/deps/v8/test/mjsunit/sealed-array-reduce.js b/deps/v8/test/mjsunit/sealed-array-reduce.js
index a572aa2cc4..2b59c44028 100644
--- a/deps/v8/test/mjsunit/sealed-array-reduce.js
+++ b/deps/v8/test/mjsunit/sealed-array-reduce.js
@@ -3,10 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --no-lazy-feedback-allocation
-/**
- * @fileoverview Test reduce and reduceRight
- */
+// TODO(v8:10195): Fix these tests s.t. we assert deoptimization occurs when
+// expected (e.g. in a %DeoptimizeNow call), then remove
+// --no-lazy-feedback-allocation.
function clone(v) {
// Shallow-copies arrays, returns everything else verbatim.
diff --git a/deps/v8/test/mjsunit/tools/foozzie.js b/deps/v8/test/mjsunit/tools/foozzie.js
new file mode 100644
index 0000000000..30faf46116
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/foozzie.js
@@ -0,0 +1,79 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Files: tools/clusterfuzz/v8_mock.js
+
+// Test foozzie mocks for differential fuzzing.
+
+// Deterministic Math.random.
+assertEquals(0.1, Math.random());
+assertEquals(0.2, Math.random());
+assertEquals(0.3, Math.random());
+
+// Deterministic date.
+assertEquals(1477662728698, Date.now());
+assertEquals(1477662728701, Date.now());
+assertEquals(1477662728705, new Date().getTime());
+assertEquals(710, new Date.prototype.constructor().getUTCMilliseconds());
+
+// Deterministic arguments in constructor keep working.
+assertEquals(819134640000,
+ new Date('December 17, 1995 03:24:00 GMT+1000').getTime());
+
+// Dummy performance methods.
+assertEquals(1.2, performance.now());
+assertEquals([], performance.measureMemory());
+
+// Worker messages follow a predefined deterministic pattern.
+const worker = new Worker(``, {type: 'string'});
+assertEquals(0, worker.getMessage());
+assertEquals(-1, worker.getMessage());
+
+// NaN patterns in typed arrays are mocked out. Test that we get no
+// difference between unoptimized and optimized code.
+function testSameOptimized(pattern, create_fun) {
+ const expected = new Uint32Array(pattern);
+ %PrepareFunctionForOptimization(create_fun);
+ assertEquals(expected, create_fun());
+ %OptimizeFunctionOnNextCall(create_fun);
+ assertEquals(expected, create_fun());
+}
+
+function testArrayType(arrayType, pattern) {
+ // Test passing NaNs to constructor with array.
+ let create = function() {
+ return new Uint32Array(new arrayType([-NaN]).buffer);
+ };
+ testSameOptimized(pattern, create);
+ // Test passing NaNs to constructor with iterator.
+ create = function() {
+ const iter = function*(){ yield* [-NaN]; }();
+ return new Uint32Array(new arrayType(iter).buffer);
+ };
+ testSameOptimized(pattern, create);
+ // Test setting NaN property.
+ create = function() {
+ const arr = new arrayType(1);
+ arr[0] = -NaN;
+ return new Uint32Array(arr.buffer);
+ };
+ // Test passing NaN using set.
+ testSameOptimized(pattern, create);
+ create = function() {
+ const arr = new arrayType(1);
+ arr.set([-NaN], 0);
+ return new Uint32Array(arr.buffer);
+ };
+ testSameOptimized(pattern, create);
+}
+
+var isBigEndian = new Uint8Array(new Uint16Array([0xABCD]).buffer)[0] === 0xAB;
+testArrayType(Float32Array, [1065353216]);
+if (isBigEndian){
+ testArrayType(Float64Array, [1072693248, 0]);
+}
+else {
+ testArrayType(Float64Array, [0, 1072693248]);
+}
diff --git a/deps/v8/test/mjsunit/tools/foozzie_archs.js b/deps/v8/test/mjsunit/tools/foozzie_archs.js
new file mode 100644
index 0000000000..9023428324
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/foozzie_archs.js
@@ -0,0 +1,84 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Files: tools/clusterfuzz/v8_mock.js
+// Files: tools/clusterfuzz/v8_mock_archs.js
+
+// Test foozzie architecture-specific mocks for differential fuzzing.
+
+// Max typed array length is mocked and restricted to 1MiB buffer.
+const maxBytes = 1048576;
+
+// The maximum also holds for array buffer and shared array buffer.
+assertEquals(maxBytes, new ArrayBuffer(maxBytes + 1).byteLength);
+assertEquals(maxBytes, new SharedArrayBuffer(maxBytes + 1).byteLength);
+
+function testArrayType(type) {
+ const name = type.name;
+ const bytesPerElem = type.BYTES_PER_ELEMENT;
+ const maxElem = maxBytes / bytesPerElem;
+
+ function testLength(expectedLength, arr) {
+ const expectedBytes = expectedLength * bytesPerElem;
+ assertEquals(expectedBytes, arr.byteLength, name);
+ assertEquals(expectedLength, arr.length, name);
+ }
+
+ // Test length argument in constructor.
+ testLength(maxElem - 1, new type(maxElem - 1));
+ testLength(maxElem, new type(maxElem));
+ testLength(maxElem, new type(maxElem + 1));
+
+ // Test buffer argument in constructor.
+ // Unaligned offsets don't throw.
+ const buffer = new ArrayBuffer(maxBytes);
+ new type(buffer, 1);
+ new type(buffer, 3);
+
+ // Offsets work or are capped.
+ function bytes(elements) {
+ return elements * bytesPerElem;
+ }
+ testLength(maxElem, new type(buffer, 0));
+ testLength(maxElem - 1, new type(buffer, bytes(1)));
+ testLength(1, new type(buffer, bytes(maxElem - 1)));
+ testLength(0, new type(buffer, bytes(maxElem)));
+ testLength(0, new type(buffer, bytes(maxElem + 1)));
+
+ // Offset and length work or are capped.
+ testLength(1, new type(buffer, 0, 1));
+ testLength(1, new type(buffer, bytesPerElem, 1));
+ testLength(maxElem - 2, new type(buffer, bytes(1), maxElem - 2));
+ testLength(maxElem - 1, new type(buffer, bytes(1), maxElem - 1));
+ testLength(maxElem - 1, new type(buffer, bytes(1), maxElem));
+ testLength(0, new type(buffer, bytes(maxElem - 1), 0));
+ testLength(1, new type(buffer, bytes(maxElem - 1), 1));
+ testLength(1, new type(buffer, bytes(maxElem - 1), 2));
+
+ // Insertion with "set" works or is capped.
+ let set0 = 0;
+ let set1 = 1;
+ if (name.startsWith("Big")) {
+ set0 = 0n;
+ set1 = 1n;
+ }
+ arr = new type(4);
+ arr.set([set1], 1);
+ assertEquals(new type([set0, set1, set0, set0]), arr, name);
+ arr.set([set1, set1], 3); // Capped to 2.
+ assertEquals(new type([set0, set1, set1, set1]), arr, name);
+}
+
+testArrayType(Int8Array);
+testArrayType(Uint8Array);
+testArrayType(Uint8ClampedArray);
+testArrayType(Int16Array);
+testArrayType(Uint16Array);
+testArrayType(Int32Array);
+testArrayType(Uint32Array);
+testArrayType(BigInt64Array);
+testArrayType(BigUint64Array);
+testArrayType(Float32Array);
+testArrayType(Float64Array);
diff --git a/deps/v8/test/mjsunit/tools/foozzie_webassembly.js b/deps/v8/test/mjsunit/tools/foozzie_webassembly.js
new file mode 100644
index 0000000000..d5130a393b
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/foozzie_webassembly.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Files: tools/clusterfuzz/v8_mock.js
+// Files: tools/clusterfuzz/v8_mock_webassembly.js
+
+// Test foozzie webassembly-specfific mocks for differential fuzzing.
+
+// No reference errors when accessing WebAssembly.
+WebAssembly[0];
+WebAssembly[" "];
+WebAssembly.foo;
+WebAssembly.foo();
+WebAssembly.foo().bar;
+WebAssembly.foo().bar();
+WebAssembly.foo().bar[0];
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
index 4a53a04468..6f6384cd61 100644
--- a/deps/v8/test/mjsunit/wasm/anyfunc.js
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -222,6 +222,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const function_index = builder.addFunction('hidden', kSig_i_v)
.addBody([kExprI32Const, expected])
.index;
+ builder.addDeclarativeElementSegment([function_index]);
builder.addFunction('main', kSig_a_v)
.addBody([kExprRefFunc, function_index])
.exportFunc();
@@ -237,6 +238,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const foo = builder.addFunction('foo', kSig_i_v)
.addBody([kExprI32Const, expected])
.exportFunc();
+ builder.addDeclarativeElementSegment([foo.index]);
builder.addFunction('main', kSig_a_v)
.addBody([kExprRefFunc, foo.index])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/anyref-globals.js b/deps/v8/test/mjsunit/wasm/anyref-globals.js
index d243e37486..ae99044a9c 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-globals.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-globals.js
@@ -568,7 +568,7 @@ function dummy_func() {
const f_func = builder.addFunction('get_anyfunc_global', kSig_a_v)
.addBody([kExprGlobalGet, g_func.index])
.exportAs('get_anyfunc_global');
-
+ builder.addDeclarativeElementSegment([f_ref.index, f_func.index]);
g_ref.function_index = f_ref.index;
g_func.function_index = f_func.index;
@@ -580,6 +580,18 @@ function dummy_func() {
instance.exports.get_anyfunc_global());
})();
+(function TestRefFuncGlobalInitUndeclared() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const global_func = builder.addGlobal(kWasmAnyFunc, true);
+ const func = builder.addFunction('get_anyfunc_global', kSig_v_v).addBody([]);
+ global_func.function_index = func.index;
+
+ assertThrows(
+ () => builder.toModule(), WebAssembly.CompileError,
+ /undeclared reference to function/);
+})();
+
(function TestRefFuncGlobalInitWithImport() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -588,6 +600,7 @@ function dummy_func() {
const import_js = builder.addImport('m', 'js', sig_index);
const g_wasm = builder.addGlobal(kWasmAnyFunc, true);
const g_js = builder.addGlobal(kWasmAnyFunc, true);
+ builder.addDeclarativeElementSegment([import_wasm, import_js]);
g_wasm.function_index = import_wasm;
g_js.function_index = import_js;
builder.addFunction('get_global_wasm', kSig_a_v)
diff --git a/deps/v8/test/mjsunit/wasm/anyref-table.js b/deps/v8/test/mjsunit/wasm/anyref-table.js
index de35dc8fb8..a7a82099d6 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-table.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-table.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-anyref
+// Flags: --experimental-wasm-anyref --experimental-wasm-bulk-memory
load("test/mjsunit/wasm/wasm-module-builder.js");
@@ -45,3 +45,35 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertThrows(() => builder.instantiate({ imp: { table: table_func } }),
WebAssembly.LinkError, /imported table does not match the expected type/);
})();
+
+(function TestAnyRefDropDeclarativeElementSegment() {
+ print(arguments.callee.name);
+
+ const builder = new WasmModuleBuilder();
+ builder.addDeclarativeElementSegment([null]);
+ builder.addFunction('drop', kSig_v_v)
+ .addBody([kNumericPrefix, kExprElemDrop, 0])
+ .exportFunc();
+ const instance = builder.instantiate();
+
+ // Counts as double-drop because declarative segments are dropped on
+ // initialization and is therefore not expected to throw.
+ instance.exports.drop();
+})();
+
+(function TestAnyRefTableInitFromDeclarativeElementSegment() {
+ print(arguments.callee.name);
+
+ const builder = new WasmModuleBuilder();
+ const table = builder.addTable(kWasmAnyFunc, 10);
+ builder.addDeclarativeElementSegment([null]);
+ builder.addFunction('init', kSig_v_v)
+ .addBody([
+ kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 1, kNumericPrefix,
+ kExprTableInit, table.index, 0
+ ])
+ .exportFunc();
+ const instance = builder.instantiate();
+
+ assertTraps(kTrapTableOutOfBounds, () => instance.exports.init());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
index 1a89c3ef1a..6f4fa06dff 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
@@ -166,36 +166,24 @@ function f32_neg(a) {
var inputs = [
- 0, 1, 2, 3, 4,
+ 0, 1,
NaN,
Infinity,
-Infinity,
- 10, 20, 30, 31, 32, 33, 100, 2000,
- 30000, 400000, 5000000,
- 100000000, 2000000000,
2147483646,
2147483647,
2147483648,
2147483649,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
- 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- -0,
- -1, -2, -3, -4,
- -10, -20, -30, -31, -32, -33, -100, -2000,
- -30000, -400000, -5000000,
- -100000000, -2000000000,
+ 4026531840, // 0xf0000000
+ 4294967293, // 0xfffffffd
+ 4294967295, // 0xffffffff
+ -0, -1,
-2147483646,
-2147483647,
-2147483648,
-2147483649,
0.1,
1.1e-2,
- 1.2e-4,
- 1.3e-8,
- 1.4e-11,
- 1.5e-12,
1.6e-13
];
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
index a07de98558..55b4cc4dda 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
@@ -228,36 +228,24 @@ function f64_neg(a) {
var inputs = [
- 0, 1, 2, 3, 4,
+ 0, 1,
NaN,
Infinity,
-Infinity,
- 10, 20, 30, 31, 32, 33, 100, 2000,
- 30000, 400000, 5000000,
- 100000000, 2000000000,
2147483646,
2147483647,
2147483648,
2147483649,
- 0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
- 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
- 0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
- 0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
- -0,
- -1, -2, -3, -4,
- -10, -20, -30, -31, -32, -33, -100, -2000,
- -30000, -400000, -5000000,
- -100000000, -2000000000,
+ 4026531840, // 0xf0000000
+ 4294967293, // 0xfffffffd
+ 4294967295, // 0xffffffff
+ -0, -1,
-2147483646,
-2147483647,
-2147483648,
-2147483649,
0.1,
1.1e-2,
- 1.2e-4,
- 1.3e-8,
- 1.4e-11,
- 1.5e-12,
1.6e-13
];
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory.js b/deps/v8/test/mjsunit/wasm/bulk-memory.js
index 065cbfd22f..2e41fa9ee0 100644
--- a/deps/v8/test/mjsunit/wasm/bulk-memory.js
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory.js
@@ -214,3 +214,20 @@ function getMemoryFill(mem) {
assertEquals(0, view[0]);
})();
+
+(function TestPassiveDataSegmentNoMemory() {
+ const builder = new WasmModuleBuilder();
+ builder.addPassiveDataSegment([0, 1, 2]);
+
+ // Should not throw.
+ builder.instantiate();
+})();
+
+(function TestPassiveElementSegmentNoMemory() {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('f', kSig_v_v).addBody([]);
+ builder.addPassiveElementSegment([0, 0, 0]);
+
+ // Should not throw.
+ builder.instantiate();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
index b18b11aac0..93bb68d6f6 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
@@ -146,6 +146,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
// Test storing and loading to/from an exception type table.
(function TestTableExnRef() {
+ print(arguments.callee.name);
let kSig_e_i = makeSig([kWasmI32], [kWasmExnRef]);
let kSig_v_ie = makeSig([kWasmI32, kWasmExnRef], []);
let builder = new WasmModuleBuilder();
@@ -182,3 +183,97 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertSame(e0, instance.exports.table.get(0));
assertSame(e1, instance.exports.table.get(1));
})();
+
+// 'br_on_exn' on a null-ref value should trap.
+(function TestBrOnExnNullRefSimple() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_r);
+ builder.addFunction('br_on_exn_nullref', kSig_v_v)
+ .addBody([
+ kExprRefNull,
+ kExprBrOnExn, 0, except,
+ kExprDrop
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertTraps(kTrapBrOnExnNullRef, () => instance.exports.br_on_exn_nullref());
+})();
+
+(function TestBrOnExnNullRefFromJS() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_i);
+ let imp = builder.addImport('imp', 'ort', kSig_i_i);
+ let kConstant0 = 11;
+ let kNoMatch = 13;
+ builder.addFunction('call_import', kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprCallFunction, imp,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprDrop,
+ kExprI32Const, kNoMatch,
+ kExprEnd
+ ]).exportFunc();
+ let instance;
+ function js_import(i) {
+ if (i == 0) return kConstant0; // Will return kConstant0.
+ if (i == 1) throw new Error('1'); // Will not match.
+ if (i == 2) throw null; // Will trap.
+ throw undefined; // Will not match.
+ }
+ instance = builder.instantiate({imp: {ort: js_import}});
+
+ assertEquals(kConstant0, instance.exports.call_import(0));
+ assertEquals(kNoMatch, instance.exports.call_import(1));
+ assertTraps(kTrapBrOnExnNullRef, () => instance.exports.call_import(2));
+ assertEquals(kNoMatch, instance.exports.call_import(3));
+})();
+
+// 'rethrow' on a null-ref value should trap.
+(function TestRethrowNullRefSimple() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_r);
+ builder.addFunction('rethrow_nullref', kSig_v_v)
+ .addBody([
+ kExprRefNull,
+ kExprRethrow
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertTraps(kTrapRethrowNullRef, () => instance.exports.rethrow_nullref());
+})();
+
+(function TestRethrowNullRefFromJS() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_i);
+ let imp = builder.addImport('imp', 'ort', kSig_i_i);
+ let kSuccess = 11;
+ builder.addFunction('call_import', kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprCallFunction, imp,
+ kExprCatch,
+ kExprRethrow,
+ kExprEnd
+ ]).exportFunc();
+ let instance;
+ function js_import(i) {
+ if (i == 0) return kSuccess; // Will return kSuccess.
+ if (i == 1) throw new Error('1'); // Will rethrow.
+ if (i == 2) throw null; // Will trap.
+ throw undefined; // Will rethrow.
+ }
+ instance = builder.instantiate({imp: {ort: js_import}});
+
+ assertEquals(kSuccess, instance.exports.call_import(0));
+ assertThrows(() => instance.exports.call_import(1), Error, '1');
+ assertTraps(kTrapRethrowNullRef, () => instance.exports.call_import(2));
+ assertThrowsEquals(() => instance.exports.call_import(3), undefined);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index ecaf0d06c9..fc82455ca8 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -85,6 +85,110 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertEquals(42, instance.exports.simple_throw_catch_to_0_1(1));
})();
+(function TestTrapNotCaught() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('unreachable_in_try', kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprUnreachable,
+ kExprCatch,
+ kExprDrop,
+ kExprEnd
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertTraps(kTrapUnreachable, () => instance.exports.unreachable_in_try());
+})();
+
+(function TestTrapInCalleeNotCaught() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let func_div = builder.addFunction('div', kSig_i_ii).addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprI32DivU
+ ]);
+ builder.addFunction('trap_in_callee', kSig_i_ii)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprCallFunction, func_div.index,
+ kExprCatch,
+ kExprDrop,
+ kExprI32Const, 11,
+ kExprEnd
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertEquals(3, instance.exports.trap_in_callee(7, 2));
+ assertTraps(kTrapDivByZero, () => instance.exports.trap_in_callee(1, 0));
+})();
+
+(function TestTrapViaJSNotCaught() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let imp = builder.addImport('imp', 'ort', kSig_i_v);
+ builder.addFunction('div', kSig_i_ii)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprI32DivU
+ ]).exportFunc();
+ builder.addFunction('call_import', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprCallFunction, imp,
+ kExprCatch,
+ kExprDrop,
+ kExprI32Const, 11,
+ kExprEnd
+ ]).exportFunc();
+ let exception = undefined;
+ let instance;
+ function js_import() {
+ try {
+ instance.exports.div(1, 0);
+ } catch (e) {
+ exception = e;
+ }
+ throw exception;
+ }
+ instance = builder.instantiate({imp: {ort: js_import}});
+ let caught = undefined;
+ try {
+ let res = instance.exports.call_import();
+ assertUnreachable('call_import should trap, but returned with ' + res);
+ } catch (e) {
+ caught = e;
+ }
+ assertSame(exception, caught);
+ assertInstanceof(exception, WebAssembly.RuntimeError);
+ assertEquals(exception.message, kTrapMsgs[kTrapDivByZero]);
+})();
+
+(function TestManuallyThrownRuntimeErrorCaught() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let imp = builder.addImport('imp', 'ort', kSig_i_v);
+ builder.addFunction('call_import', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprCallFunction, imp,
+ kExprCatch,
+ kExprDrop,
+ kExprI32Const, 11,
+ kExprEnd
+ ]).exportFunc();
+ function throw_exc() {
+ throw exception = new WebAssembly.RuntimeError('My user text');
+ }
+ let instance = builder.instantiate({imp: {ort: throw_exc}});
+
+ assertEquals(11, instance.exports.call_import());
+})();
+
// Test that we can distinguish which exception was thrown by using a cascaded
// sequence of nested try blocks with a single handler in each catch block.
(function TestCatchComplex1() {
diff --git a/deps/v8/test/mjsunit/wasm/grow-huge-memory.js b/deps/v8/test/mjsunit/wasm/grow-huge-memory.js
new file mode 100644
index 0000000000..5fe8f5dccf
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/grow-huge-memory.js
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Save some memory on Linux; other platforms ignore this flag.
+// Flags: --multi-mapped-mock-allocator
+
+// Test that we can grow memories to sizes beyond 2GB.
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function GetMemoryPages(memory) {
+ return memory.buffer.byteLength >>> 16;
+}
+
+(function TestGrowFromJS() {
+ let mem = new WebAssembly.Memory({initial: 200});
+ mem.grow(40000);
+ assertEquals(40200, GetMemoryPages(mem));
+})();
+
+(function TestGrowFromWasm() {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(200, 50000, true);
+ builder.addFunction("grow", kSig_i_v)
+ .addBody([
+ ...wasmI32Const(40000), // Number of pages to grow by.
+ kExprMemoryGrow, kMemoryZero, // Grow memory.
+ kExprDrop, // Drop result of grow (old pages).
+ kExprMemorySize, kMemoryZero // Get the memory size.
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertEquals(40200, instance.exports.grow());
+ assertEquals(40200, GetMemoryPages(instance.exports.memory));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index 6d0e7e5c5f..2984b1a6b6 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -37,151 +37,86 @@ function genMemoryGrowBuilder() {
}
// V8 internal memory size limit.
-var kV8MaxPages = 32767;
+var kV8MaxPages = 65536;
-// TODO(gdeepti): Generate tests programatically for all the sizes instead of
-// current implementation.
-function testMemoryGrowReadWrite32() {
+function testMemoryGrowReadWriteBase(size, load_fn, store_fn) {
+ // size is the number of bytes for load and stores.
var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
var offset;
- function peek() { return module.exports.load(offset); }
- function poke(value) { return module.exports.store(offset, value); }
+ var load = module.exports[load_fn];
+ var store = module.exports[store_fn];
+ function peek() { return load(offset); }
+ function poke(value) { return store(offset, value); }
function growMem(pages) { return module.exports.grow_memory(pages); }
- for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
+ // Instead of checking every n-th offset, check the first 5.
+ for(offset = 0; offset <= (4*size); offset+=size) {
poke(20);
assertEquals(20, peek());
}
- for (offset = kPageSize - 3; offset < kPageSize + 4; offset++) {
+ for (offset = kPageSize - (size - 1); offset < kPageSize + size; offset++) {
assertTraps(kTrapMemOutOfBounds, poke);
assertTraps(kTrapMemOutOfBounds, peek);
}
assertEquals(1, growMem(3));
- for (offset = kPageSize; offset <= 4*kPageSize -4; offset+=4) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = 4*kPageSize - 3; offset < 4*kPageSize + 4; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- assertTraps(kTrapMemOutOfBounds, peek);
+ for (let n = 1; n <= 3; n++) {
+ for (offset = n * kPageSize - 5 * size; offset <= n * kPageSize + 4 * size;
+ offset += size) {
+ // Check the 5 offsets to the before and after the n-th page.
+ // page n-1 page n page n+1
+ // +---- ... ------------+---------- ... +------ ...
+ // | | | ... | | | | | | | | | | | | ... | | | | ...
+ // <+> ^ ^
+ // | first offset last offset
+ // +-> size bytes
+ poke(20);
+ assertEquals(20, peek());
+ }
}
- assertEquals(4, growMem(15));
-
- for (offset = 4*kPageSize - 3; offset <= 4*kPageSize + 4; offset+=4) {
+ // Check the last 5 valid offsets of the last page.
+ for (offset = 4*kPageSize-size-(4*size); offset <= 4*kPageSize -size; offset+=size) {
poke(20);
assertEquals(20, peek());
}
- for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 4; offset+=4) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = 19*kPageSize - 3; offset < 19*kPageSize + 5; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- assertTraps(kTrapMemOutOfBounds, peek);
- }
-}
-
-testMemoryGrowReadWrite32();
-function testMemoryGrowReadWrite16() {
- var builder = genMemoryGrowBuilder();
- builder.addMemory(1, undefined, false);
- var module = builder.instantiate();
- var offset;
- function peek() { return module.exports.load16(offset); }
- function poke(value) { return module.exports.store16(offset, value); }
- function growMem(pages) { return module.exports.grow_memory(pages); }
-
- for(offset = 0; offset <= (kPageSize - 2); offset+=2) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = kPageSize - 1; offset < kPageSize + 4; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- assertTraps(kTrapMemOutOfBounds, peek);
- }
-
- assertEquals(1, growMem(3));
-
- for (offset = kPageSize; offset <= 4*kPageSize -2; offset+=2) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = 4*kPageSize - 1; offset < 4*kPageSize + 4; offset++) {
+ for (offset = 4*kPageSize - (size-1); offset < 4*kPageSize + size; offset++) {
assertTraps(kTrapMemOutOfBounds, poke);
assertTraps(kTrapMemOutOfBounds, peek);
}
assertEquals(4, growMem(15));
- for (offset = 4*kPageSize - 2; offset <= 4*kPageSize + 4; offset+=2) {
+ for (offset = 4*kPageSize - (size-1); offset <= 4*kPageSize + size; offset+=size) {
poke(20);
assertEquals(20, peek());
}
- for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 2; offset+=2) {
+ for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - size; offset+=size) {
poke(20);
assertEquals(20, peek());
}
- for (offset = 19*kPageSize - 1; offset < 19*kPageSize + 5; offset++) {
+ for (offset = 19*kPageSize - (size-1); offset < 19*kPageSize + 5; offset++) {
assertTraps(kTrapMemOutOfBounds, poke);
assertTraps(kTrapMemOutOfBounds, peek);
}
}
-testMemoryGrowReadWrite16();
-
-function testMemoryGrowReadWrite8() {
- var builder = genMemoryGrowBuilder();
- builder.addMemory(1, undefined, false);
- var module = builder.instantiate();
- var offset;
- function peek() { return module.exports.load8(offset); }
- function poke(value) { return module.exports.store8(offset, value); }
- function growMem(pages) { return module.exports.grow_memory(pages); }
-
- for(offset = 0; offset <= kPageSize - 1; offset++) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = kPageSize; offset < kPageSize + 4; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- assertTraps(kTrapMemOutOfBounds, peek);
- }
+(function testMemoryGrowReadWrite32() {
+ testMemoryGrowReadWriteBase(4, "load", "store");
+})();
- assertEquals(1, growMem(3));
+(function testMemoryGrowReadWrite16() {
+ testMemoryGrowReadWriteBase(2, "load16", "store16");
+})();
- for (offset = kPageSize; offset <= 4*kPageSize -1; offset++) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = 4*kPageSize; offset < 4*kPageSize + 4; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- assertTraps(kTrapMemOutOfBounds, peek);
- }
-
- assertEquals(4, growMem(15));
-
- for (offset = 4*kPageSize; offset <= 4*kPageSize + 4; offset++) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 1; offset++) {
- poke(20);
- assertEquals(20, peek());
- }
- for (offset = 19*kPageSize; offset < 19*kPageSize + 5; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- assertTraps(kTrapMemOutOfBounds, peek);
- }
-}
-
-testMemoryGrowReadWrite8();
+(function testMemoryGrowReadWrite8() {
+ testMemoryGrowReadWriteBase(1, "load8", "store8");
+})();
function testMemoryGrowZeroInitialSize() {
var builder = genMemoryGrowBuilder();
@@ -197,7 +132,14 @@ function testMemoryGrowZeroInitialSize() {
assertEquals(0, growMem(1));
- for(offset = 0; offset <= kPageSize - 4; offset++) {
+ // Check first 5 offsets.
+ for(offset = 0; offset <= 5; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+
+ // Check last 5 offsets.
+ for(offset = kPageSize - 5*4; offset <= kPageSize - 4; offset++) {
poke(20);
assertEquals(20, peek());
}
@@ -217,13 +159,15 @@ function testMemoryGrowZeroInitialSize() {
testMemoryGrowZeroInitialSize();
-function testMemoryGrowZeroInitialSize32() {
+function testMemoryGrowZeroInitialSizeBase(size, load_fn, store_fn) {
var builder = genMemoryGrowBuilder();
builder.addMemory(0, undefined, false);
var module = builder.instantiate();
var offset;
- function peek() { return module.exports.load(offset); }
- function poke(value) { return module.exports.store(offset, value); }
+ var load = module.exports[load_fn];
+ var store = module.exports[store_fn];
+ function peek() { return load(offset); }
+ function poke(value) { return store(offset, value); }
function growMem(pages) { return module.exports.grow_memory(pages); }
assertTraps(kTrapMemOutOfBounds, peek);
@@ -231,69 +175,34 @@ function testMemoryGrowZeroInitialSize32() {
assertEquals(0, growMem(1));
- for(offset = 0; offset <= kPageSize - 4; offset++) {
+ // Instead of checking every offset, check the first 5.
+ for(offset = 0; offset <= 4; offset++) {
poke(20);
assertEquals(20, peek());
}
- for(offset = kPageSize - 3; offset <= kPageSize + 5; offset++) {
- assertTraps(kTrapMemOutOfBounds, peek);
- }
-}
-
-testMemoryGrowZeroInitialSize32();
-
-function testMemoryGrowZeroInitialSize16() {
- var builder = genMemoryGrowBuilder();
- builder.addMemory(0, undefined, false);
- var module = builder.instantiate();
- var offset;
- function peek() { return module.exports.load16(offset); }
- function poke(value) { return module.exports.store16(offset, value); }
- function growMem(pages) { return module.exports.grow_memory(pages); }
-
- assertTraps(kTrapMemOutOfBounds, peek);
- assertTraps(kTrapMemOutOfBounds, poke);
-
- assertEquals(0, growMem(1));
-
- for(offset = 0; offset <= kPageSize - 2; offset++) {
+ // Check the last 5 valid ones.
+ for(offset = kPageSize - (size * 4); offset <= kPageSize - size; offset++) {
poke(20);
assertEquals(20, peek());
}
- for(offset = kPageSize - 1; offset <= kPageSize + 5; offset++) {
+ for(offset = kPageSize - (size - 1); offset <= kPageSize + 5; offset++) {
assertTraps(kTrapMemOutOfBounds, peek);
}
}
-testMemoryGrowZeroInitialSize16();
+(function testMemoryGrowZeroInitialSize32() {
+ testMemoryGrowZeroInitialSizeBase(4, "load", "store");
+})();
-function testMemoryGrowZeroInitialSize8() {
- var builder = genMemoryGrowBuilder();
- builder.addMemory(0, undefined, false);
- var module = builder.instantiate();
- var offset;
- function peek() { return module.exports.load8(offset); }
- function poke(value) { return module.exports.store8(offset, value); }
- function growMem(pages) { return module.exports.grow_memory(pages); }
+(function testMemoryGrowZeroInitialSize16() {
+ testMemoryGrowZeroInitialSizeBase(2, "load16", "store16");
+})();
- assertTraps(kTrapMemOutOfBounds, peek);
- assertTraps(kTrapMemOutOfBounds, poke);
-
- assertEquals(0, growMem(1));
-
- for(offset = 0; offset <= kPageSize - 1; offset++) {
- poke(20);
- assertEquals(20, peek());
- }
-
- for(offset = kPageSize; offset <= kPageSize + 5; offset++) {
- assertTraps(kTrapMemOutOfBounds, peek);
- }
-}
-
-testMemoryGrowZeroInitialSize8();
+(function testMemoryGrowZeroInitialSize8() {
+ testMemoryGrowZeroInitialSizeBase(1, "load8", "store8");
+})();
function testMemoryGrowTrapMaxPagesZeroInitialMemory() {
var builder = genMemoryGrowBuilder();
@@ -343,78 +252,55 @@ function testMemoryGrowCurrentMemory() {
testMemoryGrowCurrentMemory();
-function testMemoryGrowPreservesDataMemOp32() {
+function testMemoryGrowPreservesDataMemOpBase(size, load_fn, store_fn) {
var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
- var offset, val;
- function peek() { return module.exports.load(offset); }
- function poke(value) { return module.exports.store(offset, value); }
+ var offset;
+ var load = module.exports[load_fn];
+ var store = module.exports[store_fn];
+ function peek() { return load(offset); }
+ function poke(value) { return store(offset, value); }
function growMem(pages) { return module.exports.grow_memory(pages); }
+ // Maximum unsigned integer of size bits.
+ const max = Math.pow(2, (size * 8)) - 1;
- for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
- poke(100000 - offset);
- assertEquals(100000 - offset, peek());
- }
-
- assertEquals(1, growMem(3));
-
- for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
- assertEquals(100000 - offset, peek());
+ // Check the first 5 offsets.
+ for(offset = 0; offset <= (4*size); offset+=size) {
+ poke(offset % max);
+ assertEquals(offset % max, peek());
}
-}
-testMemoryGrowPreservesDataMemOp32();
-
-function testMemoryGrowPreservesDataMemOp16() {
- var builder = genMemoryGrowBuilder();
- builder.addMemory(1, undefined, false);
- var module = builder.instantiate();
- var offset, val;
- function peek() { return module.exports.load16(offset); }
- function poke(value) { return module.exports.store16(offset, value); }
- function growMem(pages) { return module.exports.grow_memory(pages); }
-
- for(offset = 0; offset <= (kPageSize - 2); offset+=2) {
- poke(65535 - offset);
- assertEquals(65535 - offset, peek());
+ // Check the last 5 valid offsets.
+ for(offset = kPageSize - 5*size; offset <= (kPageSize - size); offset+=size) {
+ poke(offset % max);
+ assertEquals(offset % max, peek());
}
assertEquals(1, growMem(3));
- for(offset = 0; offset <= (kPageSize - 2); offset+=2) {
- assertEquals(65535 - offset, peek());
+ // Check the first 5 offsets are preserved by growMem.
+ for(offset = 0; offset <= (4*size); offset+=size) {
+ assertEquals(offset % max, peek());
}
-}
-
-testMemoryGrowPreservesDataMemOp16();
-function testMemoryGrowPreservesDataMemOp8() {
- var builder = genMemoryGrowBuilder();
- builder.addMemory(1, undefined, false);
- var module = builder.instantiate();
- var offset, val = 0;
- function peek() { return module.exports.load8(offset); }
- function poke(value) { return module.exports.store8(offset, value); }
- function growMem(pages) { return module.exports.grow_memory(pages); }
-
- for(offset = 0; offset <= (kPageSize - 1); offset++, val++) {
- poke(val);
- assertEquals(val, peek());
- if (val == 255) val = 0;
+ // Check the last 5 valid offsets are preserved by growMem.
+ for(offset = kPageSize - 5*size; offset <= (kPageSize - size); offset+=size) {
+ assertEquals(offset % max, peek());
}
+}
- assertEquals(1, growMem(3));
-
- val = 0;
+(function testMemoryGrowPreservesDataMemOp32() {
+ testMemoryGrowPreservesDataMemOpBase(4, "load", "store");
+})();
- for(offset = 0; offset <= (kPageSize - 1); offset++, val++) {
- assertEquals(val, peek());
- if (val == 255) val = 0;
- }
-}
+(function testMemoryGrowPreservesDataMemOp16() {
+ testMemoryGrowPreservesDataMemOpBase(2, "load16", "store16");
+})();
-testMemoryGrowPreservesDataMemOp8();
+(function testMemoryGrowPreservesDataMemOp8() {
+ testMemoryGrowPreservesDataMemOpBase(1, "load8", "store8");
+})();
function testMemoryGrowOutOfBoundsOffset() {
var builder = genMemoryGrowBuilder();
@@ -436,7 +322,12 @@ function testMemoryGrowOutOfBoundsOffset() {
assertEquals(3, growMem(1));
- for (offset = 3*kPageSize; offset <= 4*kPageSize - 4; offset++) {
+ for (offset = 3*kPageSize; offset <= 3*kPageSize + 4; offset++) {
+ poke(0xaced);
+ assertEquals(0xaced, peek());
+ }
+
+ for (offset = 4*kPageSize-8; offset <= 4*kPageSize - 4; offset++) {
poke(0xaced);
assertEquals(0xaced, peek());
}
@@ -477,23 +368,18 @@ function testMemoryGrowDeclaredMaxTraps() {
testMemoryGrowDeclaredMaxTraps();
-function testMemoryGrowDeclaredSpecMaxTraps() {
- // The spec maximum is higher than the internal V8 maximum. This test only
- // checks that grow_memory does not grow past the internally defined maximum
- // to reflect the current implementation.
+(function testMemoryGrowInternalMaxTraps() {
+ // This test checks that grow_memory does not grow past the internally
+ // defined maximum memory size.
var builder = genMemoryGrowBuilder();
builder.addMemory(1, kSpecMaxPages, false);
var module = builder.instantiate();
- function poke(value) { return module.exports.store(offset, value); }
function growMem(pages) { return module.exports.grow_memory(pages); }
assertEquals(1, growMem(20));
assertEquals(-1, growMem(kV8MaxPages - 20));
-}
-
-testMemoryGrowDeclaredSpecMaxTraps();
+})();
-function testMemoryGrow2Gb() {
- print("testMemoryGrow2Gb");
+(function testMemoryGrow4Gb() {
var builder = genMemoryGrowBuilder();
builder.addMemory(1, undefined, false);
var module = builder.instantiate();
@@ -502,36 +388,46 @@ function testMemoryGrow2Gb() {
function poke(value) { return module.exports.store(offset, value); }
function growMem(pages) { return module.exports.grow_memory(pages); }
- for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
+ // Check first 5 offsets.
+ for (offset = 0; offset <= 4 * 4; offset += 4) {
+ poke(100000 - offset);
+ assertEquals(100000 - offset, peek());
+ }
+
+ // Check last 5 offsets.
+ for (offset = (kPageSize - 5 * 4); offset <= (kPageSize - 4); offset += 4) {
poke(100000 - offset);
assertEquals(100000 - offset, peek());
}
let result = growMem(kV8MaxPages - 1);
- if (result == 1 ){
- for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
+ if (result == 1) {
+ // Check first 5 offsets.
+ for (offset = 0; offset <= 4 * 4; offset += 4) {
+ assertEquals(100000 - offset, peek());
+ }
+
+ // Check last 5 offsets.
+ for (offset = (kPageSize - 5 * 4); offset <= (kPageSize - 4); offset += 4) {
assertEquals(100000 - offset, peek());
}
- // Bounds check for large mem size
- for(offset = (kV8MaxPages - 1) * kPageSize;
- offset <= (kV8MaxPages * kPageSize - 4); offset+=4) {
+ // Bounds check for large mem size.
+ let kMemSize = (kV8MaxPages * kPageSize);
+ let kLastValidOffset = kMemSize - 4; // Accommodate a 4-byte read/write.
+ // Check first 5 offsets of last page.
+ for (offset = kMemSize - kPageSize; offset <= kMemSize - kPageSize + 4 * 4;
+ offset += 4) {
poke(0xaced);
assertEquals(0xaced, peek());
}
-
- for (offset = kV8MaxPages * kPageSize - 3;
- offset <= kV8MaxPages * kPageSize + 4; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
+ for (offset = kLastValidOffset - 5 * 4; offset <= kLastValidOffset;
+ offset += 4) {
+ poke(0xaced);
+ assertEquals(0xaced, peek());
}
- // Check traps around 3GB/4GB boundaries
- let offset_3gb = 49152 * kPageSize;
- let offset_4gb = 2 * kV8MaxPages * kPageSize;
- for (offset = offset_3gb - 5; offset < offset_3gb + 4; offset++) {
- assertTraps(kTrapMemOutOfBounds, poke);
- }
- for (offset = offset_4gb - 5; offset < offset_4gb; offset++) {
+ for (offset = kLastValidOffset + 1; offset < kMemSize; offset++) {
assertTraps(kTrapMemOutOfBounds, poke);
}
} else {
@@ -539,6 +435,4 @@ function testMemoryGrow2Gb() {
// bit platforms. When grow_memory fails, expected result is -1.
assertEquals(-1, result);
}
-}
-
-testMemoryGrow2Gb();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/import-memory.js b/deps/v8/test/mjsunit/wasm/import-memory.js
index 08100efabd..bac6d8f624 100644
--- a/deps/v8/test/mjsunit/wasm/import-memory.js
+++ b/deps/v8/test/mjsunit/wasm/import-memory.js
@@ -7,7 +7,7 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
// V8 internal memory size limit.
-var kV8MaxPages = 32767;
+var kV8MaxPages = 65536;
(function TestOne() {
print("TestOne");
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
index 2d15e03c79..2e72a88839 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
@@ -186,11 +186,11 @@ function redirectToInterpreter(
assertUnreachable('should trap because of unreachable instruction');
} catch (e) {
checkStack(stripPath(e.stack), [
- 'Error: i=8', // -
- /^ at imp \(file:\d+:29\)$/, // -
- ' at plus_one (wasm-function[1]:0x3b)', // -
- ' at plus_two (wasm-function[1]:0x3e)', // -
- /^ at testStackTraceThroughCWasmEntry \(file:\d+:25\)$/, // -
+ 'Error: i=8', // -
+ /^ at imp \(file:\d+:29\)$/, // -
+ ' at plus_one (<anonymous>:wasm-function[1]:0x3b)', // -
+ ' at plus_two (<anonymous>:wasm-function[1]:0x3e)', // -
+ /^ at testStackTraceThroughCWasmEntry \(file:\d+:25\)$/, // -
/^ at file:\d+:3$/
]);
}
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index 43ecc4a33a..99debff846 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -37,10 +37,10 @@ function checkStack(stack, expected_lines) {
instance.exports.main();
assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
checkStack(stripPath(stack), [
- 'Error: test imported stack', // -
- /^ at func \(interpreter.js:\d+:28\)$/, // -
- ' at main (wasm-function[1]:0x32)', // -
- /^ at testCallImported \(interpreter.js:\d+:22\)$/, // -
+ 'Error: test imported stack', // -
+ /^ at func \(interpreter.js:\d+:28\)$/, // -
+ ' at main (<anonymous>:wasm-function[1]:0x32)', // -
+ /^ at testCallImported \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
}
@@ -102,10 +102,10 @@ function checkStack(stack, expected_lines) {
}
assertEquals(interpreted_before + 2, %WasmNumInterpretedCalls(instance));
checkStack(stripPath(stack), [
- 'RuntimeError: unreachable', // -
- ' at foo (wasm-function[0]:0x27)', // -
- ' at main (wasm-function[1]:0x2c)', // -
- /^ at testTrap \(interpreter.js:\d+:24\)$/, // -
+ 'RuntimeError: unreachable', // -
+ ' at foo (<anonymous>:wasm-function[0]:0x27)', // -
+ ' at main (<anonymous>:wasm-function[1]:0x2c)', // -
+ /^ at testTrap \(interpreter.js:\d+:24\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
}
@@ -134,10 +134,10 @@ function checkStack(stack, expected_lines) {
}
assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
checkStack(stripPath(stack), [
- 'Error: thrown from imported function', // -
- /^ at func \(interpreter.js:\d+:11\)$/, // -
- ' at main (wasm-function[1]:0x32)', // -
- /^ at testThrowFromImport \(interpreter.js:\d+:24\)$/, // -
+ 'Error: thrown from imported function', // -
+ /^ at func \(interpreter.js:\d+:11\)$/, // -
+ ' at main (<anonymous>:wasm-function[1]:0x32)', // -
+ /^ at testThrowFromImport \(interpreter.js:\d+:24\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
}
@@ -218,10 +218,10 @@ function checkStack(stack, expected_lines) {
for (var e = 0; e < stacks.length; ++e) {
expected = ['Error: reentrant interpreter test #' + e];
expected.push(/^ at func \(interpreter.js:\d+:17\)$/);
- expected.push(' at main (wasm-function[1]:0x36)');
+ expected.push(' at main (<anonymous>:wasm-function[1]:0x36)');
for (var k = e; k > 0; --k) {
expected.push(/^ at func \(interpreter.js:\d+:33\)$/);
- expected.push(' at main (wasm-function[1]:0x36)');
+ expected.push(' at main (<anonymous>:wasm-function[1]:0x36)');
}
expected.push(
/^ at testReentrantInterpreter \(interpreter.js:\d+:22\)$/);
@@ -295,10 +295,10 @@ function checkStack(stack, expected_lines) {
} catch (e) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
- 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at direct (wasm-function[1]:0x55)', // -
- ' at main (wasm-function[3]:0x64)', // -
- /^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
+ 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
+ ' at direct (<anonymous>:wasm-function[1]:0x55)', // -
+ ' at main (<anonymous>:wasm-function[3]:0x64)', // -
+ /^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
}
@@ -308,10 +308,10 @@ function checkStack(stack, expected_lines) {
} catch (e) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
- 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at indirect (wasm-function[2]:0x5c)', // -
- ' at main (wasm-function[3]:0x64)', // -
- /^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
+ 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
+ ' at indirect (<anonymous>:wasm-function[2]:0x5c)', // -
+ ' at main (<anonymous>:wasm-function[3]:0x64)', // -
+ /^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
}
@@ -358,8 +358,8 @@ function checkStack(stack, expected_lines) {
if (!(e instanceof RangeError)) throw e;
checkStack(stripPath(e.stack), [
'RangeError: Maximum call stack size exceeded',
- ' at main (wasm-function[0]:0x20)'
- ].concat(Array(9).fill(' at main (wasm-function[0]:0x22)')));
+ ' at main (<anonymous>:wasm-function[0]:0x20)'
+ ].concat(Array(9).fill(' at main (<anonymous>:wasm-function[0]:0x22)')));
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/liftoff-simd-params.js b/deps/v8/test/mjsunit/wasm/liftoff-simd-params.js
new file mode 100644
index 0000000000..1ee7f74a32
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/liftoff-simd-params.js
@@ -0,0 +1,77 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// This test case tries to exercise SIMD stack to stack movements by creating
+// a function that has many parameters.
+(function() {
+const builder = new WasmModuleBuilder();
+// At this point we have limited support for SIMD operations, but we can load
+// and store s128 values. So this memory will be used for reading and writing
+// s128 values and we will assert expected values from JS.
+builder.addImportedMemory('m', 'imported_mem', 1, 2);
+builder.addType(makeSig(new Array(18).fill(kWasmS128), []));
+
+builder.addFunction(undefined, makeSig([], []))
+ .addLocals({s128_count: 9})
+ .addBodyWithEnd([
+ // These will all be args to the callee.
+ // Load first arg from memory, this was written with values from JS.
+ kExprI32Const, 0, // i32.const
+ kSimdPrefix, kExprS128LoadMem, 0, 0, // s128.load
+ kExprLocalGet, 0, // local.get
+ kExprLocalGet, 1, // local.get
+ kExprLocalGet, 2, // local.get
+ kExprLocalGet, 3, // local.get
+ kExprLocalGet, 4, // local.get
+ kExprLocalGet, 5, // local.get
+ kExprLocalGet, 6, // local.get
+ kExprLocalGet, 7, // local.get
+ kExprLocalGet, 8, // local.get
+ kExprLocalGet, 0, // local.get
+ kExprLocalGet, 1, // local.get
+ kExprLocalGet, 2, // local.get
+ kExprLocalGet, 3, // local.get
+ kExprLocalGet, 4, // local.get
+ kExprLocalGet, 5, // local.get
+ kExprLocalGet, 6, // local.get
+ // Load last s128 from memory, this was written with values from JS.
+ kExprI32Const, 16, // i32.const
+ kSimdPrefix, kExprS128LoadMem, 0, 0, // s128.load
+ kExprCallFunction, 0x01, // call
+ kExprEnd, // end
+ ]);
+
+builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
+ kExprI32Const, 32, // i32.const
+ kExprLocalGet, 0, // local.get
+ kSimdPrefix, kExprS128StoreMem, 0, 0, // s128.store
+ kExprI32Const, 48, // i32.const
+ kExprLocalGet, 17, // local.get
+ kSimdPrefix, kExprS128StoreMem, 0, 0, // s128.store
+ kExprEnd, // end
+]);
+
+builder.addExport('main', 0);
+var memory = new WebAssembly.Memory({initial: 1, maximum: 2});
+const instance = builder.instantiate({m: {imported_mem: memory}});
+
+// We write sentinel values to two s128 values at the start of the memory.
+// Function 1 will read these values from memory, and pass them as the first
+// and last arg to function 2. Function 2 then write these values to memory
+// after these two s128 values.
+const arr = new Uint32Array(memory.buffer);
+for (let i = 0; i < 8; i++) {
+ arr[0] = i * 2;
+}
+
+instance.exports.main();
+
+for (let i = 0; i < 8; i++) {
+ assertEquals(arr[i], arr[i + 8]);
+}
+})();
diff --git a/deps/v8/test/mjsunit/wasm/many-modules.js b/deps/v8/test/mjsunit/wasm/many-modules.js
index 66db04237a..be235778b6 100644
--- a/deps/v8/test/mjsunit/wasm/many-modules.js
+++ b/deps/v8/test/mjsunit/wasm/many-modules.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// No reason to stress-opt this; save some time.
-// Flags: --wasm-far-jump-table --no-stress-opt
+// Flags: --no-stress-opt
load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/memory-external-call.js b/deps/v8/test/mjsunit/wasm/memory-external-call.js
index 1bb4bb1ecc..fed47b61fd 100644
--- a/deps/v8/test/mjsunit/wasm/memory-external-call.js
+++ b/deps/v8/test/mjsunit/wasm/memory-external-call.js
@@ -302,7 +302,7 @@ function assertMemoryIndependence(load_a, store_a, load_b, store_b) {
assertMemoryIndependence(a.exports.load, table.get(1),
b.exports.load, table.get(0));
- // Check that calling (from WASM) through the table maintains independence.
+ // Check that calling (from Wasm) through the table maintains independence.
builder = new WasmModuleBuilder();
builder.addImportedTable("m", "table", kTableSize, kTableSize);
var sig_index = builder.addType(kSig_v_ii);
diff --git a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
index f180cf6234..2c1a1bd08e 100644
--- a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
+++ b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --randomize-all-allocations
-// Flags: --wasm-far-jump-table --wasm-max-initial-code-space-reservation=1
+// Flags: --wasm-max-initial-code-space-reservation=1
load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index d6ae8541a6..1f3b8146da 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -23,7 +23,7 @@ function verifyStack(frames, expected) {
assertContains(exp[4], frames[i].getFileName(), "["+i+"].getFileName()");
var toString;
if (exp[0]) {
- toString = "wasm-function[" + exp[6] + "]:" + exp[5];
+ toString = "<anonymous>:wasm-function[" + exp[6] + "]:" + exp[5];
if (exp[1] !== null) toString = exp[1] + " (" + toString + ")";
} else {
toString = exp[4] + ":" + exp[2] + ":";
@@ -69,10 +69,10 @@ var module = builder.instantiate({mod: {func: STACK}});
(function testSimpleStack() {
var expected_string = 'Error\n' +
// The line numbers below will change as this test gains / loses lines..
- ' at STACK (stack.js:38:11)\n' + // --
- ' at main (wasm-function[1]:0x86)\n' + // --
- ' at testSimpleStack (stack.js:77:18)\n' + // --
- ' at stack.js:79:3'; // --
+ ' at STACK (stack.js:38:11)\n' + // --
+ ' at main (<anonymous>:wasm-function[1]:0x86)\n' + // --
+ ' at testSimpleStack (stack.js:77:18)\n' + // --
+ ' at stack.js:79:3'; // --
module.exports.main();
assertEquals(expected_string, stripPath(stack));
@@ -119,7 +119,7 @@ Error.prepareStackTrace = function(error, frames) {
assertContains("out of bounds", e.message);
verifyStack(e.stack, [
// isWasm function line pos file offset funcIndex
- [ true, null, 0, 3, null, '0x91', 3],
+ [ true, "mem_out_of_bounds", 0, 3, null, '0x91', 3],
[ true, "call_mem_out_of_bounds", 0, 1, null, '0x97', 4],
[ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
[ false, null, 128, 0, "stack.js"]
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index 3e718cf06c..7c1bf1eb19 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -138,6 +138,7 @@ const dummy_func = exports.set_table_func1;
const function_index = builder.addFunction('hidden', sig_index)
.addBody([kExprI32Const, expected])
.index;
+ builder.addDeclarativeElementSegment([function_index]);
builder.addFunction('main', kSig_i_v)
.addBody([
diff --git a/deps/v8/test/mjsunit/wasm/table-fill.js b/deps/v8/test/mjsunit/wasm/table-fill.js
index 64c4d7732d..760c662c7f 100644
--- a/deps/v8/test/mjsunit/wasm/table-fill.js
+++ b/deps/v8/test/mjsunit/wasm/table-fill.js
@@ -93,8 +93,7 @@ function checkAnyRefTable(getter, start, count, value) {
(function testAnyRefTableFillOOB() {
print(arguments.callee.name);
- // Fill table out-of-bounds, check if the table got filled as much as
- // possible.
+ // Fill table out-of-bounds, check if the table wasn't altered.
let start = 7;
let value = {foo: 27};
// {maximum + 4} elements definitely don't fit into the table.
@@ -103,14 +102,14 @@ function checkAnyRefTable(getter, start, count, value) {
kTrapTableOutOfBounds,
() => instance.exports[`fill${import_ref}`](start, value, count));
checkAnyRefTable(
- instance.exports[`get${import_ref}`], start, size - start, value);
+ instance.exports[`get${import_ref}`], start, size - start, null);
value = 45;
assertTraps(
kTrapTableOutOfBounds,
() => instance.exports[`fill${internal_ref}`](start, value, count));
checkAnyRefTable(
- instance.exports[`get${internal_ref}`], start, size - start, value);
+ instance.exports[`get${internal_ref}`], start, size - start, null);
})();
(function testAnyRefTableFillOOBCountZero() {
@@ -160,8 +159,7 @@ function checkAnyFuncTable(call, start, count, value) {
(function testAnyFuncTableFillOOB() {
print(arguments.callee.name);
- // Fill table out-of-bounds, check if the table got filled as much as
- // possible.
+ // Fill table out-of-bounds, check if the table wasn't altered.
let start = 7;
let value = 38;
// {maximum + 4} elements definitely don't fit into the table.
@@ -171,7 +169,7 @@ function checkAnyFuncTable(call, start, count, value) {
() => instance.exports[`fill${import_func}`](
start, dummy_func(value), count));
checkAnyFuncTable(
- instance.exports[`call${import_func}`], start, size - start, value);
+ instance.exports[`call${import_func}`], start, size - start, null);
value = 46;
assertTraps(
@@ -179,7 +177,7 @@ function checkAnyFuncTable(call, start, count, value) {
() => instance.exports[`fill${internal_func}`](
start, dummy_func(value), count));
checkAnyFuncTable(
- instance.exports[`call${internal_func}`], start, size - start, value);
+ instance.exports[`call${internal_func}`], start, size - start, null);
})();
(function testAnyFuncTableFillOOBCountZero() {
diff --git a/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js b/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
index bfd973c607..cbd763ddcd 100644
--- a/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --liftoff --wasm-tier-up --no-stress-opt
+// Flags: --allow-natives-syntax
load('test/mjsunit/wasm/wasm-module-builder.js');
-const num_functions = 2;
+const num_functions = 200;
function create_builder(delta = 0) {
const builder = new WasmModuleBuilder();
@@ -18,21 +18,37 @@ function create_builder(delta = 0) {
return builder;
}
-function check(instance) {
- %WasmTierDownModule(instance);
+function checkTieredDown(instance) {
for (let i = 0; i < num_functions; ++i) {
assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
}
+}
+
+function checkTieredUp(instance) {
+ // Busy waiting until all functions are tiered up.
+ let num_liftoff_functions;
+ while (true) {
+ num_liftoff_functions = 0;
+ for (let i = 0; i < num_functions; ++i) {
+ if (%IsLiftoffFunction(instance.exports['f' + i])) {
+ num_liftoff_functions++;
+ }
+ }
+ if (num_liftoff_functions == 0) return;
+ }
+}
+
+function check(instance) {
+ %WasmTierDownModule(instance);
+ checkTieredDown(instance);
for (let i = 0; i < num_functions; ++i) {
%WasmTierUpFunction(instance, i);
- assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
}
+ checkTieredDown(instance);
%WasmTierUpModule(instance);
- for (let i = 0; i < num_functions; ++i) {
- assertFalse(%IsLiftoffFunction(instance.exports['f' + i]));
- }
+ checkTieredUp(instance);
}
(function testTierDownToLiftoff() {
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index bac877d187..c0dfd8b26b 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -343,6 +343,39 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
() => new WebAssembly.Function({parameters:[], results:[]}, _ => 0));
})();
+(function TestFunctionConstructorWithWasmExportedFunction() {
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction('func1', kSig_v_i).addBody([]).exportFunc();
+ builder.addFunction('func2', kSig_v_v).addBody([]).exportFunc();
+
+ const instance = builder.instantiate();
+ assertThrows(
+ () => new WebAssembly.Function(
+ {parameters: [], results: []}, instance.exports.func1),
+ TypeError,
+ 'WebAssembly.Function(): The signature of Argument 1 (a ' +
+ 'WebAssembly function) does not match the signature specified in ' +
+ 'Argument 0');
+
+ assertDoesNotThrow(
+ () => new WebAssembly.Function(
+ {parameters: [], results: []}, instance.exports.func2));
+})();
+
+(function TestFunctionConstructorWithWasmJSFunction() {
+ const func = new WebAssembly.Function({parameters: [], results: []}, _ => 0);
+
+ assertDoesNotThrow(
+ () => new WebAssembly.Function({parameters: [], results: []}, func));
+ assertThrows(
+ () => new WebAssembly.Function({parameters: ['i32'], results: []}, func),
+ TypeError,
+ 'WebAssembly.Function(): The signature of Argument 1 (a ' +
+ 'WebAssembly function) does not match the signature specified in ' +
+ 'Argument 0');
+})();
+
(function TestFunctionConstructorNonArray1() {
let log = []; // Populated with a log of accesses.
let two = { toString: () => "2" }; // Just a fancy "2".
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-exported.js b/deps/v8/test/mjsunit/wasm/user-properties-exported.js
index 0b2f249e05..ed71e37281 100644
--- a/deps/v8/test/mjsunit/wasm/user-properties-exported.js
+++ b/deps/v8/test/mjsunit/wasm/user-properties-exported.js
@@ -22,7 +22,7 @@ load("test/mjsunit/wasm/user-properties-common.js");
testProperties(g);
- // The WASM-internal fields of {g} are only inspected when {g} is
+ // The Wasm-internal fields of {g} are only inspected when {g} is
// used as an import into another instance.
print(" instance 2, importing");
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-module.js b/deps/v8/test/mjsunit/wasm/user-properties-module.js
index 84727e1b53..e7a547df17 100644
--- a/deps/v8/test/mjsunit/wasm/user-properties-module.js
+++ b/deps/v8/test/mjsunit/wasm/user-properties-module.js
@@ -45,7 +45,7 @@ load("test/mjsunit/wasm/user-properties-common.js");
assertEquals(f(j), g(j));
}
verifyHeap();
- // The WASM-internal fields of {g} are only inspected when {g} is
+ // The Wasm-internal fields of {g} are only inspected when {g} is
// used as an import into another instance. Use {g} as the import
// the next time through the loop.
f = g;
diff --git a/deps/v8/test/mjsunit/wasm/user-properties-reexport.js b/deps/v8/test/mjsunit/wasm/user-properties-reexport.js
index e4f155df5a..0f278b65c5 100644
--- a/deps/v8/test/mjsunit/wasm/user-properties-reexport.js
+++ b/deps/v8/test/mjsunit/wasm/user-properties-reexport.js
@@ -23,7 +23,7 @@ load("test/mjsunit/wasm/user-properties-common.js");
testProperties(g);
printName(" after", g);
- // The WASM-internal fields of {g} are only inspected when {g} is
+ // The Wasm-internal fields of {g} are only inspected when {g} is
// used as an import into another instance. Use {g} as the import
// the next time through the loop.
f = g;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 432353239d..845236cf2e 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -45,7 +45,7 @@ var kWasmV3 = 0;
var kHeaderSize = 8;
var kPageSize = 65536;
-var kSpecMaxPages = 65535;
+var kSpecMaxPages = 65536;
var kMaxVarInt32Size = 5;
var kMaxVarInt64Size = 10;
@@ -65,7 +65,7 @@ let kElementSectionCode = 9; // Elements section
let kCodeSectionCode = 10; // Function code
let kDataSectionCode = 11; // Data segments
let kDataCountSectionCode = 12; // Data segment count (between Element & Code)
-let kExceptionSectionCode = 13; // Exception section (between Global & Export)
+let kExceptionSectionCode = 13; // Exception section (between Memory & Global)
// Name section types
let kModuleNameCode = 0;
@@ -82,7 +82,9 @@ let kSharedHasMaximumFlag = 3;
let kActiveNoIndex = 0;
let kPassive = 1;
let kActiveWithIndex = 2;
+let kDeclarative = 3;
let kPassiveWithElements = 5;
+let kDeclarativeWithElements = 7;
// Function declaration flags
let kDeclFunctionName = 0x01;
@@ -468,9 +470,13 @@ let kExprI64AtomicCompareExchange32U = 0x4e;
let kExprS128LoadMem = 0x00;
let kExprS128StoreMem = 0x01;
let kExprI32x4Splat = 0x0c;
+let kExprF32x4Splat = 0x12;
let kExprI32x4Eq = 0x2c;
+let kExprS1x8AnyTrue = 0x63;
let kExprS1x4AllTrue = 0x75;
+let kExprI32x4Add = 0x79;
let kExprF32x4Min = 0x9e;
+let kExprS8x16LoadSplat = 0xc2;
// Compilation hint constants.
let kCompilationHintStrategyDefault = 0x00;
@@ -495,6 +501,8 @@ let kTrapUnalignedAccess = 9;
let kTrapDataSegmentDropped = 10;
let kTrapElemSegmentDropped = 11;
let kTrapTableOutOfBounds = 12;
+let kTrapBrOnExnNullRef = 13;
+let kTrapRethrowNullRef = 14;
let kTrapMsgs = [
"unreachable",
@@ -509,7 +517,9 @@ let kTrapMsgs = [
"operation does not support unaligned accesses",
"data segment has been dropped",
"element segment has been dropped",
- "table access out of bounds"
+ "table access out of bounds",
+ "br_on_exn on nullref value",
+ "rethrowing nullref value"
];
function assertTraps(trap, code) {
@@ -906,13 +916,26 @@ class WasmModuleBuilder {
}
addElementSegment(table, base, is_global, array) {
- this.element_segments.push({table: table, base: base, is_global: is_global,
- array: array, is_active: true});
+ this.element_segments.push({
+ table: table,
+ base: base,
+ is_global: is_global,
+ array: array,
+ is_active: true,
+ is_declarative: false
+ });
return this;
}
addPassiveElementSegment(array, is_import = false) {
- this.element_segments.push({array: array, is_active: false});
+ this.element_segments.push(
+ {array: array, is_active: false, is_declarative: false});
+ return this;
+ }
+
+ addDeclarativeElementSegment(array, is_import = false) {
+ this.element_segments.push(
+ {array: array, is_active: false, is_declarative: true});
return this;
}
@@ -1057,6 +1080,18 @@ class WasmModuleBuilder {
});
}
+ // Add event section.
+ if (wasm.exceptions.length > 0) {
+ if (debug) print("emitting events @ " + binary.length);
+ binary.emit_section(kExceptionSectionCode, section => {
+ section.emit_u32v(wasm.exceptions.length);
+ for (let type of wasm.exceptions) {
+ section.emit_u32v(kExceptionAttribute);
+ section.emit_u32v(type);
+ }
+ });
+ }
+
// Add global section.
if (wasm.globals.length > 0) {
if (debug) print ("emitting globals @ " + binary.length);
@@ -1110,18 +1145,6 @@ class WasmModuleBuilder {
});
}
- // Add exceptions.
- if (wasm.exceptions.length > 0) {
- if (debug) print("emitting exceptions @ " + binary.length);
- binary.emit_section(kExceptionSectionCode, section => {
- section.emit_u32v(wasm.exceptions.length);
- for (let type of wasm.exceptions) {
- section.emit_u32v(kExceptionAttribute);
- section.emit_u32v(type);
- }
- });
- }
-
// Add export table.
var mem_export = (wasm.memory !== undefined && wasm.memory.exp);
var exports_count = wasm.exports.length + (mem_export ? 1 : 0);
@@ -1180,9 +1203,20 @@ class WasmModuleBuilder {
for (let index of init.array) {
section.emit_u32v(index);
}
+ } else if (
+ init.is_declarative &&
+ init.array.every(index => index !== null)) {
+ section.emit_u8(kDeclarative);
+ section.emit_u8(kExternalFunction);
+ section.emit_u32v(init.array.length);
+ for (let index of init.array) {
+ section.emit_u32v(index);
+ }
} else {
- // Passive segment.
- section.emit_u8(kPassiveWithElements); // flags
+ // Passive or declarative segment with elements.
+ section.emit_u8(
+ init.is_declarative ? kDeclarativeWithElements :
+ kPassiveWithElements); // flags
section.emit_u8(kWasmAnyFunc);
section.emit_u32v(init.array.length);
for (let index of init.array) {
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 99478a6dbf..c8603bd4fb 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -150,6 +150,12 @@
'built-ins/TypedArray/prototype/copyWithin/coerced-values-end-detached-prototype': [FAIL],
'built-ins/TypedArray/prototype/copyWithin/coerced-values-start-detached': [FAIL],
'built-ins/TypedArray/prototype/copyWithin/coerced-values-end-detached': [FAIL],
+ # fill should also throw on detached buffers
+ 'built-ins/TypedArray/prototype/fill/coerced-value-detach': [FAIL],
+ 'built-ins/TypedArray/prototype/fill/coerced-end-detach': [FAIL],
+ 'built-ins/TypedArray/prototype/fill/coerced-start-detach': [FAIL],
+ # includes should also throw on detached buffers
+ 'built-ins/TypedArray/prototype/includes/detached-buffer-tointeger': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4951
'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
@@ -496,17 +502,12 @@
'language/comments/hashbang/use-strict': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=8179
- # Temporarily removed pending https://github.com/tc39/test262/issues/2339
- #
- # These tests require exception handling support which is currently
- # blocked on landing https://chromium-review.googlesource.com/c/v8/v8/+/1655655
- 'built-ins/FinalizationGroup/FinalizationGroupCleanupIteratorPrototype/next-job-not-active-throws': [FAIL],
- 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-throws-in-callback': [FAIL],
- 'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-callback-throws': [FAIL],
- # 'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-cleanup-callback-throws': [FAIL],
+ 'built-ins/FinalizationRegistry/FinalizationRegistryCleanupIteratorPrototype/next-job-not-active-throws': [FAIL],
+ 'built-ins/FinalizationRegistry/prototype/cleanupSome/cleanup-throws-in-callback': [FAIL],
+ 'built-ins/FinalizationRegistry/prototype/cleanupSome/poisoned-callback-throws': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9612
- 'intl402/DateTimeFormat/prototype/formatRange/fractionalSecondDigits': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=10313
+ 'built-ins/Date/parse/without-utc-offset': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=9613
'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
@@ -535,7 +536,7 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=10111
# super() should evaluate arguments before checking IsConstructable
- 'language/expressions/class/super-evaluation-order': [FAIL],
+ 'language/expressions/super/call-proto-not-ctor': [FAIL],
# Intl.NumberFormat(..., {signDisplay:'exceptZero'}).format(0.0001)
# produce +0 due to rounding
@@ -552,10 +553,9 @@
'intl402/NumberFormat/prototype/formatToParts/signDisplay-ko-KR': [FAIL],
'intl402/NumberFormat/prototype/formatToParts/signDisplay-zh-TW': [FAIL],
- # Intl.NumberFormat constructor should throw RangeError
- # https://bugs.chromium.org/p/v8/issues/detail?id=10112
- 'intl402/NumberFormat/constructor-order': [FAIL],
- 'intl402/NumberFormat/constructor-unit': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=10272
+ 'intl402/DateTimeFormat/invalid-numbering-system-calendar-options': [FAIL],
+ 'intl402/NumberFormat/invalid-numbering-system-options': [FAIL],
######################## NEEDS INVESTIGATION ###########################
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index b971a22e5a..e1f1956392 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -53,7 +53,7 @@ FEATURE_FLAGS = {
'Symbol.prototype.description': '--harmony-symbol-description',
'export-star-as-namespace-from-module': '--harmony-namespace-exports',
'Promise.allSettled': '--harmony-promise-all-settled',
- 'FinalizationGroup': '--harmony-weak-refs',
+ 'FinalizationRegistry': '--harmony-weak-refs',
'WeakRef': '--harmony-weak-refs',
'host-gc-required': '--expose-gc-as=v8GC',
'optional-chaining': '--harmony-optional-chaining',
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 8948fd0eb0..0693878cce 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -767,7 +767,7 @@ namespace test {
check(a.b.GetX() == 2);
}
- class InternalClass extends Struct {
+ class InternalClass extends HeapObject {
macro Flip() labels NotASmi {
const tmp = Cast<Smi>(this.b) otherwise NotASmi;
this.b = this.a;
@@ -833,7 +833,7 @@ namespace test {
return new FixedArray{map: kFixedArrayMap, length: 5, objects: ...i};
}
- class SmiPair extends Struct {
+ class SmiPair extends HeapObject {
macro GetA():&Smi {
return & this.a;
}
@@ -920,7 +920,7 @@ namespace test {
StaticAssert(1 + 2 == 3);
}
- class SmiBox extends Struct {
+ class SmiBox extends HeapObject {
value: Smi;
unrelated: Smi;
}
@@ -1120,4 +1120,108 @@ namespace test {
check(val3.d == 99);
check(val3.e == 1234);
}
+
+ @export
+ class ExportedSubClass extends ExportedSubClassBase {
+ c_field: int32;
+ d_field: int32;
+ e_field: Smi;
+ }
+
+ @export
+ class ExportedSubClassBase extends HeapObject {
+ a: HeapObject;
+ b: HeapObject;
+ }
+
+ class InternalClassWithSmiElements extends FixedArrayBase {
+ data: Smi;
+ object: Oddball;
+ entries[length]: Smi;
+ }
+
+ struct InternalClassStructElement {
+ a: Smi;
+ b: Smi;
+ }
+
+ class InternalClassWithStructElements extends HeapObject {
+ dummy1: int32;
+ dummy2: int32;
+ const count: Smi;
+ data: Smi;
+ object: Object;
+ entries[count]: Smi;
+ more_entries[count]: InternalClassStructElement;
+ }
+
+ struct SmiGeneratorIterator {
+ macro Next(): Smi labels _NoMore {
+ return this.value++;
+ }
+ value: Smi;
+ }
+
+ struct InternalClassStructElementGeneratorIterator {
+ macro Next(): InternalClassStructElement labels _NoMore {
+ return InternalClassStructElement{a: this.value++, b: this.value++};
+ }
+ value: Smi;
+ }
+
+ @export
+ macro TestFullyGeneratedClassWithElements() {
+ // Test creation, initialization and access of a fully generated class with
+ // simple (Smi) elements
+ const length: Smi = Convert<Smi>(3);
+ const object1 = new InternalClassWithSmiElements{
+ length,
+ data: 0,
+ object: Undefined,
+ entries: ...SmiGeneratorIterator {
+ value: 11
+ }
+ };
+ assert(object1.length == 3);
+ assert(object1.data == 0);
+ assert(object1.object == Undefined);
+ assert(object1.entries[0] == 11);
+ assert(object1.entries[1] == 12);
+ assert(object1.entries[2] == 13);
+
+ // Test creation, initialization and access of a fully generated class
+ // with elements that are a struct.
+ const object2 = new InternalClassWithStructElements{
+ dummy1: 44,
+ dummy2: 45,
+ count: length,
+ data: 55,
+ object: Undefined,
+ entries: ...SmiGeneratorIterator{value: 3},
+ more_entries: ...InternalClassStructElementGeneratorIterator {
+ value: 1
+ }
+ };
+
+ assert(object2.dummy1 == 44);
+ assert(object2.dummy2 == 45);
+ assert(object2.count == 3);
+ assert(object2.data == 55);
+ assert(object2.object == Undefined);
+ assert(object2.entries[0] == 3);
+ assert(object2.entries[1] == 4);
+ assert(object2.entries[2] == 5);
+ assert(object2.more_entries[0].a == 1);
+ assert(object2.more_entries[0].b == 2);
+ assert(object2.more_entries[1].a == 3);
+ assert(object2.more_entries[1].b == 4);
+ assert(object2.more_entries[2].a == 5);
+ assert(object2.more_entries[2].b == 6);
+ }
+
+ @export
+ macro TestFullyGeneratedClassFromCpp(): ExportedSubClass {
+ return new
+ ExportedSubClass{a: Null, b: Null, c_field: 7, d_field: 8, e_field: 9};
+ }
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index ab407c2fe6..f3b060e5da 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -7,7 +7,7 @@ import("../../gni/v8.gni")
if (is_fuchsia) {
import("//build/config/fuchsia/rules.gni")
- fuchsia_package("v8_unittests_pkg") {
+ cr_fuchsia_package("v8_unittests_pkg") {
testonly = true
binary = ":unittests"
package_name_override = "v8_unittests"
@@ -20,6 +20,52 @@ if (is_fuchsia) {
}
}
+# Stand-alone target for C++ GC unittests. This is used to ensure that it
+# builds without V8 as well. They are also included in the regular unittests
+# target for simplicity.
+v8_executable("cppgc_unittests") {
+ testonly = true
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ sources = [ "heap/cppgc/run-all-unittests.cc" ]
+
+ deps = [
+ ":cppgc_unittests_sources",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+}
+
+v8_source_set("cppgc_unittests_sources") {
+ testonly = true
+
+ sources = [
+ "heap/cppgc/allocation_unittest.cc",
+ "heap/cppgc/finalizer-trait_unittest.cc",
+ "heap/cppgc/garbage-collected_unittest.cc",
+ "heap/cppgc/gc-info_unittest.cc",
+ "heap/cppgc/heap-object-header_unittest.cc",
+ "heap/cppgc/stack_unittest.cc",
+ "heap/cppgc/tests.cc",
+ "heap/cppgc/tests.h",
+ ]
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ deps = [
+ "../..:cppgc_for_testing",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+}
+
v8_executable("unittests") {
testonly = true
@@ -29,6 +75,7 @@ v8_executable("unittests") {
#}],
deps = [
+ ":cppgc_unittests_sources",
":unittests_sources",
"../..:v8_for_testing",
"../..:v8_libbase",
@@ -170,12 +217,13 @@ v8_source_set("unittests_sources") {
"heap/heap-controller-unittest.cc",
"heap/heap-unittest.cc",
"heap/item-parallel-job-unittest.cc",
+ "heap/local-heap-unittest.cc",
"heap/marking-unittest.cc",
"heap/marking-worklist-unittest.cc",
"heap/memory-reducer-unittest.cc",
"heap/object-stats-unittest.cc",
"heap/off-thread-factory-unittest.cc",
- "heap/scavenge-job-unittest.cc",
+ "heap/safepoint-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
"heap/unmapper-unittest.cc",
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index 3b63666f4b..cdcce68efd 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -19,8 +19,7 @@ bool AccessCheck(Local<Context> accessing_context,
MaybeLocal<Value> CompileRun(Isolate* isolate, const char* source) {
Local<String> source_string =
- String::NewFromUtf8(isolate, source, NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8(isolate, source).ToLocalChecked();
Local<Context> context = isolate->GetCurrentContext();
Local<Script> script =
Script::Compile(context, source_string).ToLocalChecked();
@@ -28,9 +27,7 @@ MaybeLocal<Value> CompileRun(Isolate* isolate, const char* source) {
}
v8::Local<v8::String> v8_str(const char* x) {
- return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x).ToLocalChecked();
}
} // namespace
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index cda251f775..429d70617a 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -75,8 +75,7 @@ using IncumbentContextTest = TestWithIsolate;
// scenarios.
TEST_F(IncumbentContextTest, Basic) {
auto Str = [&](const char* s) {
- return String::NewFromUtf8(isolate(), s, NewStringType::kNormal)
- .ToLocalChecked();
+ return String::NewFromUtf8(isolate(), s).ToLocalChecked();
};
auto Run = [&](Local<Context> context, const char* script) {
Context::Scope scope(context);
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index 39434a8f9a..5fcc78bbe1 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -105,8 +105,7 @@ TEST_F(RemoteObjectTest, ClassOf) {
AccessCheck, NamedPropertyHandlerConfiguration(NamedGetter),
IndexedPropertyHandlerConfiguration());
constructor_template->SetClassName(
- String::NewFromUtf8(isolate(), "test_class", NewStringType::kNormal)
- .ToLocalChecked());
+ String::NewFromUtf8Literal(isolate(), "test_class"));
Local<Object> remote_object =
constructor_template->NewRemoteInstance().ToLocalChecked();
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index eb72d45263..a3c0c2574c 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -20,9 +20,7 @@ TEST_F(ObjectTest, SetAccessorWhenUnconfigurablePropAlreadyDefined) {
TryCatch try_catch(isolate());
Local<Object> global = context()->Global();
- Local<String> property_name =
- String::NewFromUtf8(isolate(), "foo", NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> property_name = String::NewFromUtf8Literal(isolate(), "foo");
PropertyDescriptor prop_desc;
prop_desc.set_configurable(false);
@@ -51,8 +49,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate());
Local<Signature> signature = Signature::New(isolate(), function_template);
Local<String> property_key =
- String::NewFromUtf8(isolate(), "property", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(isolate(), "property");
Local<FunctionTemplate> get_or_set = FunctionTemplate::New(
isolate(),
[](const FunctionCallbackInfo<Value>& info) {
@@ -72,8 +69,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
Local<Function> interface_for_prototype =
function_template->GetFunction(prototype_context).ToLocalChecked();
Local<String> prototype_key =
- String::NewFromUtf8(isolate(), "prototype", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(isolate(), "prototype");
Local<Object> prototype =
interface_for_prototype->Get(caller_context, prototype_key)
.ToLocalChecked()
@@ -91,9 +87,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
EXPECT_EQ(2, call_count);
// Test with a compiled version.
- Local<String> object_key =
- String::NewFromUtf8(isolate(), "object", NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> object_key = String::NewFromUtf8Literal(isolate(), "object");
caller_context->Global()->Set(caller_context, object_key, object).ToChecked();
const char script[] =
"function f() { object.property; object.property = 0; } "
@@ -103,10 +97,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
"f();";
Context::Scope scope(caller_context);
internal::FLAG_allow_natives_syntax = true;
- Script::Compile(
- caller_context,
- String::NewFromUtf8(isolate(), script, v8::NewStringType::kNormal)
- .ToLocalChecked())
+ Script::Compile(caller_context, String::NewFromUtf8Literal(isolate(), script))
.ToLocalChecked()
->Run(caller_context)
.ToLocalChecked();
@@ -123,8 +114,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPlatformObject) {
Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate());
Local<Signature> signature = Signature::New(isolate(), function_template);
Local<String> property_key =
- String::NewFromUtf8(isolate(), "property", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(isolate(), "property");
Local<FunctionTemplate> get_or_set = FunctionTemplate::New(
isolate(),
[](const FunctionCallbackInfo<Value>& info) {
@@ -149,9 +139,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPlatformObject) {
EXPECT_EQ(2, call_count);
// Test with a compiled version.
- Local<String> object_key =
- String::NewFromUtf8(isolate(), "object", NewStringType::kNormal)
- .ToLocalChecked();
+ Local<String> object_key = String::NewFromUtf8Literal(isolate(), "object");
caller_context->Global()->Set(caller_context, object_key, object).ToChecked();
const char script[] =
"function f() { object.property; object.property = 0; } "
@@ -161,10 +149,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPlatformObject) {
"f();";
Context::Scope scope(caller_context);
internal::FLAG_allow_natives_syntax = true;
- Script::Compile(
- caller_context,
- String::NewFromUtf8(isolate(), script, v8::NewStringType::kNormal)
- .ToLocalChecked())
+ Script::Compile(caller_context, String::NewFromUtf8Literal(isolate(), script))
.ToLocalChecked()
->Run(caller_context)
.ToLocalChecked();
@@ -180,8 +165,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnInterface) {
Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate());
Local<String> property_key =
- String::NewFromUtf8(isolate(), "property", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(isolate(), "property");
Local<FunctionTemplate> get_or_set = FunctionTemplate::New(
isolate(),
[](const FunctionCallbackInfo<Value>& info) {
@@ -204,8 +188,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnInterface) {
// Test with a compiled version.
Local<String> interface_key =
- String::NewFromUtf8(isolate(), "Interface", NewStringType::kNormal)
- .ToLocalChecked();
+ String::NewFromUtf8Literal(isolate(), "Interface");
caller_context->Global()
->Set(caller_context, interface_key, interface)
.ToChecked();
@@ -217,10 +200,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnInterface) {
"f();";
Context::Scope scope(caller_context);
internal::FLAG_allow_natives_syntax = true;
- Script::Compile(
- caller_context,
- String::NewFromUtf8(isolate(), script, v8::NewStringType::kNormal)
- .ToLocalChecked())
+ Script::Compile(caller_context, String::NewFromUtf8Literal(isolate(), script))
.ToLocalChecked()
->Run(caller_context)
.ToLocalChecked();
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
index 76dd04d77c..2c0c01701c 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
@@ -32,14 +32,15 @@ class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -49,8 +50,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -60,7 +62,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
index 02a6a8e763..0a9ac748c0 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
@@ -32,14 +32,17 @@ class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
+ __ CodeEntry();
+
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -49,10 +52,13 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
+ __ CodeEntry();
+
// Fail if the first parameter is 17.
__ Mov(w1, Immediate(17));
__ Cmp(w0, w1); // 1st parameter is in {w0}.
@@ -60,7 +66,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
@@ -113,6 +119,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ CodeEntry();
__ Push(x0, padreg);
__ Mov(test_case.object, x1);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
index 548cb34fc7..f0cb96d47d 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
@@ -5,6 +5,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
namespace v8 {
@@ -16,26 +17,30 @@ namespace internal {
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-TEST(TurboAssemblerTest, TestHardAbort) {
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer->start());
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST(TurboAssemblerTest, TestCheck) {
+TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -45,9 +50,9 @@ TEST(TurboAssemblerTest, TestCheck) {
__ ret(0);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer->start());
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
index 5ea6b2f3f8..b8a645e6a7 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
@@ -22,14 +22,15 @@ class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -39,8 +40,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter (in {a0}) is 17.
@@ -48,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
index fe9e815981..c954ffcc65 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
@@ -22,14 +22,15 @@ class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -39,8 +40,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter (in {a0}) is 17.
@@ -48,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
index 51744bd92d..08c205c2ea 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
@@ -22,15 +22,15 @@ class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
-
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -40,9 +40,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
-
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
index 959ec03157..b0c398f571 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
@@ -22,14 +22,15 @@ class TurboAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@@ -39,8 +40,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -50,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret();
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
index 621f598f75..43dd6b79d6 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
@@ -5,6 +5,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
namespace v8 {
@@ -16,26 +17,30 @@ namespace internal {
// a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects.
-TEST(TurboAssemblerTest, TestHardAbort) {
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
__ Abort(AbortReason::kNoReason);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer->start());
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
}
-TEST(TurboAssemblerTest, TestCheck) {
+TEST_F(TurboAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
- TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
+ __ set_root_array_available(false);
__ set_abort_hard(true);
// Fail if the first parameter is 17.
@@ -45,9 +50,9 @@ TEST(TurboAssemblerTest, TestCheck) {
__ ret(0);
CodeDesc desc;
- tasm.GetCode(nullptr, &desc);
+ tasm.GetCode(isolate(), &desc);
buffer->MakeExecutable();
- auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer->start());
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index 27154b3c24..b447778b49 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -83,5 +83,18 @@ TEST_F(ThreadLocalStorageTest, DoTest) {
Join();
}
+TEST(StackTest, GetStackStart) { EXPECT_NE(nullptr, Stack::GetStackStart()); }
+
+TEST(StackTest, GetCurrentStackPosition) {
+ EXPECT_NE(nullptr, Stack::GetCurrentStackPosition());
+}
+
+TEST(StackTest, StackVariableInBounds) {
+ void* dummy;
+ ASSERT_GT(Stack::GetStackStart(), Stack::GetCurrentStackPosition());
+ EXPECT_GT(Stack::GetStackStart(), Stack::GetStackSlot(&dummy));
+ EXPECT_LT(Stack::GetCurrentStackPosition(), Stack::GetStackSlot(&dummy));
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/region-allocator-unittest.cc b/deps/v8/test/unittests/base/region-allocator-unittest.cc
index caec4894c9..df154ff4f3 100644
--- a/deps/v8/test/unittests/base/region-allocator-unittest.cc
+++ b/deps/v8/test/unittests/base/region-allocator-unittest.cc
@@ -14,9 +14,7 @@ using Address = RegionAllocator::Address;
using v8::internal::KB;
using v8::internal::MB;
-class RegionAllocatorTest : public ::testing::TestWithParam<int> {};
-
-TEST_P(RegionAllocatorTest, SimpleAllocateRegionAt) {
+TEST(RegionAllocatorTest, SimpleAllocateRegionAt) {
const size_t kPageSize = 4 * KB;
const size_t kPageCount = 16;
const size_t kSize = kPageSize * kPageCount;
@@ -50,7 +48,7 @@ TEST_P(RegionAllocatorTest, SimpleAllocateRegionAt) {
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
}
-TEST_P(RegionAllocatorTest, SimpleAllocateRegion) {
+TEST(RegionAllocatorTest, SimpleAllocateRegion) {
const size_t kPageSize = 4 * KB;
const size_t kPageCount = 16;
const size_t kSize = kPageSize * kPageCount;
@@ -79,7 +77,7 @@ TEST_P(RegionAllocatorTest, SimpleAllocateRegion) {
CHECK_EQ(ra.free_size(), 0);
}
-TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
+TEST(RegionAllocatorTest, AllocateRegionRandom) {
const size_t kPageSize = 8 * KB;
const size_t kPageCountLog = 16;
const size_t kPageCount = (size_t{1} << kPageCountLog);
@@ -87,7 +85,7 @@ TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
const Address kBegin = static_cast<Address>(153 * MB);
const Address kEnd = kBegin + kSize;
- base::RandomNumberGenerator rng(GetParam());
+ base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
RegionAllocator ra(kBegin, kSize, kPageSize);
std::set<Address> allocated_pages;
@@ -123,7 +121,7 @@ TEST_P(RegionAllocatorTest, AllocateRegionRandom) {
CHECK_EQ(ra.AllocateRegion(kPageSize), RegionAllocator::kAllocationFailure);
}
-TEST_P(RegionAllocatorTest, AllocateBigRegions) {
+TEST(RegionAllocatorTest, AllocateBigRegions) {
const size_t kPageSize = 4 * KB;
const size_t kPageCountLog = 10;
const size_t kPageCount = (size_t{1} << kPageCountLog) - 1;
@@ -153,7 +151,7 @@ TEST_P(RegionAllocatorTest, AllocateBigRegions) {
CHECK_EQ(ra.free_size(), 0);
}
-TEST_P(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
+TEST(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
const size_t kPageSize = 4 * KB;
const size_t kPageCountLog = 10;
const size_t kPageCount = (size_t{1} << kPageCountLog);
@@ -187,8 +185,8 @@ TEST_P(RegionAllocatorTest, MergeLeftToRightCoalecsingRegions) {
CHECK_EQ(ra.free_size(), 0);
}
-TEST_P(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
- base::RandomNumberGenerator rng(GetParam());
+TEST(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
+ base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
const size_t kPageSize = 4 * KB;
const size_t kPageCountLog = 10;
const size_t kPageCount = (size_t{1} << kPageCountLog);
@@ -236,7 +234,7 @@ TEST_P(RegionAllocatorTest, MergeRightToLeftCoalecsingRegions) {
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
}
-TEST_P(RegionAllocatorTest, Fragmentation) {
+TEST(RegionAllocatorTest, Fragmentation) {
const size_t kPageSize = 64 * KB;
const size_t kPageCount = 9;
const size_t kSize = kPageSize * kPageCount;
@@ -283,7 +281,7 @@ TEST_P(RegionAllocatorTest, Fragmentation) {
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
}
-TEST_P(RegionAllocatorTest, FindRegion) {
+TEST(RegionAllocatorTest, FindRegion) {
const size_t kPageSize = 4 * KB;
const size_t kPageCount = 16;
const size_t kSize = kPageSize * kPageCount;
@@ -322,7 +320,7 @@ TEST_P(RegionAllocatorTest, FindRegion) {
}
}
-TEST_P(RegionAllocatorTest, TrimRegion) {
+TEST(RegionAllocatorTest, TrimRegion) {
const size_t kPageSize = 4 * KB;
const size_t kPageCount = 64;
const size_t kSize = kPageSize * kPageCount;
@@ -352,7 +350,5 @@ TEST_P(RegionAllocatorTest, TrimRegion) {
CHECK_EQ(ra.AllocateRegion(kSize), kBegin);
}
-INSTANTIATE_TEST_SUITE_P(RegionAllocatorTest, RegionAllocatorTest,
- testing::Values(123));
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index 95191819a8..9607aa94a9 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -338,7 +338,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
Node* context = m.Parameter(2);
ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
- ZoneVector<MachineType> empty_types(zone());
+ ZoneVector<MachineType> tagged_type(1, MachineType::AnyTagged(), zone());
+ ZoneVector<MachineType> empty_type(zone());
auto call_descriptor = Linkage::GetJSCallDescriptor(
zone(), false, 1,
@@ -349,9 +350,10 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
m.Int32Constant(1));
Node* locals = m.AddNode(
- m.common()->TypedStateValues(&empty_types, SparseInputMask::Dense()));
+ m.common()->TypedStateValues(&empty_type, SparseInputMask::Dense()));
Node* stack = m.AddNode(
- m.common()->TypedStateValues(&empty_types, SparseInputMask::Dense()));
+ m.common()->TypedStateValues(&tagged_type, SparseInputMask::Dense()),
+ m.UndefinedConstant());
Node* context_sentinel = m.Int32Constant(0);
Node* state_node = m.AddNode(
m.common()->FrameState(bailout_id, OutputFrameStateCombine::PokeAt(0),
@@ -487,7 +489,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
Node* context2 = m.Int32Constant(46);
ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
- ZoneVector<MachineType> int32x2_type(2, MachineType::Int32(), zone());
ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
@@ -518,8 +519,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
m.common()->TypedStateValues(&float64_type, SparseInputMask::Dense()),
m.Float64Constant(0.25));
Node* stack2 = m.AddNode(
- m.common()->TypedStateValues(&int32x2_type, SparseInputMask::Dense()),
- m.Int32Constant(44), m.Int32Constant(45));
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(44));
Node* state_node =
m.AddNode(m.common()->FrameState(bailout_id_before,
OutputFrameStateCombine::PokeAt(0),
@@ -550,7 +551,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
1 + // Code object.
1 + // Poison index.
1 + // Frame state deopt id
- 6 + // One input for each value in frame state + context.
+ 5 + // One input for each value in frame state + context.
5 + // One input for each value in the parent frame state + context.
1 + // Function.
1; // Context.
@@ -576,17 +577,16 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
// Values from the nested frame.
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
- EXPECT_EQ(2u, desc_before->stack_count());
+ EXPECT_EQ(1u, desc_before->stack_count());
EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(9)));
EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(10)));
EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(11)));
EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(12)));
- EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(13)));
// Function.
- EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(14)));
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13)));
// Context.
- EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(15)));
+ EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(14)));
// Continuation.
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index d3c81344f2..c6fe8948bc 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -256,7 +256,7 @@ TEST_F(BytecodeAnalysisTest, SimpleLoop) {
expected_liveness.emplace_back("L..L", "L.L.");
loop_builder.BindContinueTarget();
- loop_builder.JumpToHeader(0);
+ loop_builder.JumpToHeader(0, nullptr);
expected_liveness.emplace_back("L.L.", "L.L.");
}
@@ -361,7 +361,7 @@ TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
builder.Bind(&end_label);
loop_builder.BindContinueTarget();
- loop_builder.JumpToHeader(0);
+ loop_builder.JumpToHeader(0, nullptr);
expected_liveness.emplace_back("L...", "L...");
}
@@ -433,12 +433,12 @@ TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
expected_liveness.emplace_back("LL.L", "LL..");
inner_loop_builder.BindContinueTarget();
- inner_loop_builder.JumpToHeader(1);
+ inner_loop_builder.JumpToHeader(1, &loop_builder);
expected_liveness.emplace_back(".L..", ".L..");
}
loop_builder.BindContinueTarget();
- loop_builder.JumpToHeader(0);
+ loop_builder.JumpToHeader(0, nullptr);
expected_liveness.emplace_back("LL..", "LL..");
}
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index dee5c56e82..4f3d05173d 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -80,6 +80,12 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
return reducer.Reduce(node);
}
+ Node* UseValue(Node* node) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ Node* zero = graph()->NewNode(common()->NumberConstant(0));
+ return graph()->NewNode(common()->Return(), zero, node, start, start);
+ }
+
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
JSHeapBroker* broker() { return &broker_; }
@@ -91,20 +97,26 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
{
- Reduction r = Reduce(Parameter(
- Type::NewConstant(broker(), factory()->minus_zero_value(), zone())));
+ Node* node = Parameter(
+ Type::Constant(broker(), factory()->minus_zero_value(), zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(-0.0));
}
{
- Reduction r = Reduce(Parameter(Type::MinusZero()));
+ Node* node = Parameter(Type::MinusZero());
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(-0.0));
}
{
- Reduction r = Reduce(Parameter(Type::Union(
+ Node* node = Parameter(Type::Union(
Type::MinusZero(),
- Type::NewConstant(broker(), factory()->NewNumber(0), zone()), zone())));
+ Type::Constant(broker(), factory()->NewNumber(0), zone()), zone()));
+ UseValue(node);
+ Reduction r = Reduce(node);
EXPECT_FALSE(r.Changed());
}
}
@@ -112,14 +124,18 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithMinusZero) {
TEST_F(ConstantFoldingReducerTest, ParameterWithNull) {
Handle<HeapObject> null = factory()->null_value();
{
- Reduction r = Reduce(Parameter(Type::NewConstant(broker(), null, zone())));
+ Node* node = Parameter(Type::Constant(broker(), null, zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(null));
+ EXPECT_THAT(use_value->InputAt(1), IsHeapConstant(null));
}
{
- Reduction r = Reduce(Parameter(Type::Null()));
+ Node* node = Parameter(Type::Null());
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(null));
+ EXPECT_THAT(use_value->InputAt(1), IsHeapConstant(null));
}
}
@@ -129,51 +145,62 @@ TEST_F(ConstantFoldingReducerTest, ParameterWithNaN) {
std::numeric_limits<double>::signaling_NaN()};
TRACED_FOREACH(double, nan, kNaNs) {
Handle<Object> constant = factory()->NewNumber(nan);
- Reduction r =
- Reduce(Parameter(Type::NewConstant(broker(), constant, zone())));
+ Node* node = Parameter(Type::Constant(broker(), constant, zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(IsNaN()));
}
{
- Reduction r = Reduce(
- Parameter(Type::NewConstant(broker(), factory()->nan_value(), zone())));
+ Node* node =
+ Parameter(Type::Constant(broker(), factory()->nan_value(), zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(IsNaN()));
}
{
- Reduction r = Reduce(Parameter(Type::NaN()));
+ Node* node = Parameter(Type::NaN());
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(IsNaN()));
}
}
TEST_F(ConstantFoldingReducerTest, ParameterWithPlainNumber) {
TRACED_FOREACH(double, value, kFloat64Values) {
Handle<Object> constant = factory()->NewNumber(value);
- Reduction r =
- Reduce(Parameter(Type::NewConstant(broker(), constant, zone())));
+ Node* node = Parameter(Type::Constant(broker(), constant, zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(value));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(value));
}
TRACED_FOREACH(double, value, kIntegerValues) {
- Reduction r = Reduce(Parameter(Type::Range(value, value, zone())));
+ Node* node = Parameter(Type::Range(value, value, zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(value));
+ EXPECT_THAT(use_value->InputAt(1), IsNumberConstant(value));
}
}
TEST_F(ConstantFoldingReducerTest, ParameterWithUndefined) {
Handle<HeapObject> undefined = factory()->undefined_value();
{
- Reduction r = Reduce(Parameter(Type::Undefined()));
+ Node* node = Parameter(Type::Undefined());
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
+ EXPECT_THAT(use_value->InputAt(1), IsUndefinedConstant());
}
{
- Reduction r =
- Reduce(Parameter(Type::NewConstant(broker(), undefined, zone())));
+ Node* node = Parameter(Type::Constant(broker(), undefined, zone()));
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
+ EXPECT_THAT(use_value->InputAt(1), IsUndefinedConstant());
}
}
@@ -193,8 +220,8 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
Type::Union(
Type::Undetectable(),
Type::Union(
- Type::NewConstant(
- broker(), factory()->false_value(), zone()),
+ Type::Constant(broker(), factory()->false_value(),
+ zone()),
Type::Range(0.0, 0.0, zone()), zone()),
zone()),
zone()),
@@ -202,28 +229,34 @@ TEST_F(ConstantFoldingReducerTest, ToBooleanWithFalsish) {
zone()),
zone()),
0);
- Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ Node* node = graph()->NewNode(simplified()->ToBoolean(), input);
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
+ EXPECT_THAT(use_value->InputAt(1), IsFalseConstant());
}
TEST_F(ConstantFoldingReducerTest, ToBooleanWithTruish) {
Node* input = Parameter(
Type::Union(
- Type::NewConstant(broker(), factory()->true_value(), zone()),
+ Type::Constant(broker(), factory()->true_value(), zone()),
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
zone()),
0);
- Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ Node* node = graph()->NewNode(simplified()->ToBoolean(), input);
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
+ EXPECT_THAT(use_value->InputAt(1), IsTrueConstant());
}
TEST_F(ConstantFoldingReducerTest, ToBooleanWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
- Reduction r = Reduce(graph()->NewNode(simplified()->ToBoolean(), input));
+ Node* node = graph()->NewNode(simplified()->ToBoolean(), input);
+ Node* use_value = UseValue(node);
+ Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
+ EXPECT_THAT(use_value->InputAt(1), IsTrueConstant());
}
} // namespace constant_folding_reducer_unittest
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 0008eadaf9..0d825dc60b 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -66,7 +66,7 @@ Node* GraphTest::NumberConstant(volatile double value) {
Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
Node* node = graph()->NewNode(common()->HeapConstant(value));
- Type type = Type::NewConstant(broker(), value, zone());
+ Type type = Type::Constant(broker(), value, zone());
NodeProperties::SetType(node, type);
return node;
}
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 7e927ea078..30e24b0aa4 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -1016,10 +1016,8 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
StubCallMode::kCallCodeObject); // stub call mode
auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
- lowering_special_case->bigint_to_i64_call_descriptor =
- bigint_to_i64_call_descriptor;
- lowering_special_case->bigint_to_i32_pair_call_descriptor =
- bigint_to_i32_pair_call_descriptor;
+ lowering_special_case->replacements.insert(
+ {bigint_to_i64_call_descriptor, bigint_to_i32_pair_call_descriptor});
Node* call_node =
graph()->NewNode(common()->Call(bigint_to_i64_call_descriptor), target,
@@ -1064,10 +1062,8 @@ TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseI64ToBigInt) {
StubCallMode::kCallCodeObject); // stub call mode
auto lowering_special_case = std::make_unique<Int64LoweringSpecialCase>();
- lowering_special_case->i64_to_bigint_call_descriptor =
- i64_to_bigint_call_descriptor;
- lowering_special_case->i32_pair_to_bigint_call_descriptor =
- i32_pair_to_bigint_call_descriptor;
+ lowering_special_case->replacements.insert(
+ {i64_to_bigint_call_descriptor, i32_pair_to_bigint_call_descriptor});
Node* call = graph()->NewNode(common()->Call(i64_to_bigint_call_descriptor),
target, i64, start(), start());
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index eed74f6181..fe5a02e3f2 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -174,8 +174,7 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithTheHole) {
Reduction r = Reduce(
graph()->NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
lhs, the_hole, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
+ ASSERT_FALSE(r.Changed());
}
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index bcf0a7101c..c3659032cf 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -1058,6 +1058,76 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
}
}
+// -----------------------------------------------------------------------------
+// Word32Equal
+
+TEST_F(MachineOperatorReducerTest,
+ Word32EqualWithShiftedMaskedValueAndConstant) {
+ // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(uint32_t, mask, kUint32Values) {
+ TRACED_FOREACH(uint32_t, rhs, kUint32Values) {
+ TRACED_FORRANGE(uint32_t, shift_bits, 1, 31) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(),
+ graph()->NewNode(machine()->Word32Shr(), p0,
+ Uint32Constant(shift_bits)),
+ Uint32Constant(mask)),
+ Uint32Constant(rhs));
+ Reduction r = Reduce(node);
+ uint32_t new_mask = mask << shift_bits;
+ uint32_t new_rhs = rhs << shift_bits;
+ if (new_mask >> shift_bits == mask && new_rhs >> shift_bits == rhs) {
+ ASSERT_TRUE(r.Changed());
+ // The left-hand side of the equality is now a Word32And operation,
+ // unless the mask is zero in which case the newly-created Word32And
+ // is immediately reduced away.
+ Matcher<Node*> lhs = mask == 0
+ ? IsInt32Constant(0)
+ : IsWord32And(p0, IsInt32Constant(new_mask));
+ EXPECT_THAT(r.replacement(),
+ IsWord32Equal(lhs, IsInt32Constant(new_rhs)));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Branch
+
+TEST_F(MachineOperatorReducerTest, BranchWithShiftedMaskedValue) {
+ // Branch condition (x >> K1) & K2 => x & (K2 << K1)
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(uint32_t, mask, kUint32Values) {
+ TRACED_FORRANGE(uint32_t, shift_bits, 1, 31) {
+ Node* node = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32And(),
+ graph()->NewNode(machine()->Word32Shr(), p0,
+ Uint32Constant(shift_bits)),
+ Uint32Constant(mask)),
+ graph()->start());
+ Reduction r = Reduce(node);
+ uint32_t new_mask = mask << shift_bits;
+ if (new_mask >> shift_bits == mask) {
+ ASSERT_TRUE(r.Changed());
+ // The branch condition is now a Word32And operation, unless the mask is
+ // zero in which case the newly-created Word32And is immediately reduced
+ // away.
+ Matcher<Node*> lhs = mask == 0
+ ? IsInt32Constant(0)
+ : IsWord32And(p0, IsInt32Constant(new_mask));
+ EXPECT_THAT(r.replacement(), IsBranch(lhs, graph()->start()));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
// -----------------------------------------------------------------------------
// Int32Sub
diff --git a/deps/v8/test/unittests/compiler/node-cache-unittest.cc b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
index 10118c3a41..777652db1b 100644
--- a/deps/v8/test/unittests/compiler/node-cache-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
@@ -17,13 +17,13 @@ namespace node_cache_unittest {
using NodeCacheTest = GraphTest;
TEST_F(NodeCacheTest, Int32Constant_back_to_back) {
- Int32NodeCache cache;
+ Int32NodeCache cache(zone());
for (int i = -2000000000; i < 2000000000; i += 3315177) {
- Node** pos = cache.Find(zone(), i);
+ Node** pos = cache.Find(i);
ASSERT_TRUE(pos != nullptr);
for (int j = 0; j < 3; j++) {
- Node** npos = cache.Find(zone(), i);
+ Node** npos = cache.Find(i);
EXPECT_EQ(pos, npos);
}
}
@@ -31,38 +31,38 @@ TEST_F(NodeCacheTest, Int32Constant_back_to_back) {
TEST_F(NodeCacheTest, Int32Constant_five) {
- Int32NodeCache cache;
+ Int32NodeCache cache(zone());
int32_t constants[] = {static_cast<int32_t>(0x80000000), -77, 0, 1, -1};
Node* nodes[arraysize(constants)];
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
Node* node = graph()->NewNode(common()->Int32Constant(k));
- *cache.Find(zone(), k) = nodes[i] = node;
+ *cache.Find(k) = nodes[i] = node;
}
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
- EXPECT_EQ(nodes[i], *cache.Find(zone(), k));
+ EXPECT_EQ(nodes[i], *cache.Find(k));
}
}
TEST_F(NodeCacheTest, Int32Constant_hits) {
- Int32NodeCache cache;
+ Int32NodeCache cache(zone());
const int32_t kSize = 1500;
Node** nodes = zone()->NewArray<Node*>(kSize);
for (int i = 0; i < kSize; i++) {
int32_t v = i * -55;
nodes[i] = graph()->NewNode(common()->Int32Constant(v));
- *cache.Find(zone(), v) = nodes[i];
+ *cache.Find(v) = nodes[i];
}
int hits = 0;
for (int i = 0; i < kSize; i++) {
int32_t v = i * -55;
- Node** pos = cache.Find(zone(), v);
+ Node** pos = cache.Find(v);
if (*pos != nullptr) {
EXPECT_EQ(nodes[i], *pos);
hits++;
@@ -73,13 +73,13 @@ TEST_F(NodeCacheTest, Int32Constant_hits) {
TEST_F(NodeCacheTest, Int64Constant_back_to_back) {
- Int64NodeCache cache;
+ Int64NodeCache cache(zone());
for (int64_t i = -2000000000; i < 2000000000; i += 3315177) {
- Node** pos = cache.Find(zone(), i);
+ Node** pos = cache.Find(i);
ASSERT_TRUE(pos != nullptr);
for (int j = 0; j < 3; j++) {
- Node** npos = cache.Find(zone(), i);
+ Node** npos = cache.Find(i);
EXPECT_EQ(pos, npos);
}
}
@@ -87,20 +87,20 @@ TEST_F(NodeCacheTest, Int64Constant_back_to_back) {
TEST_F(NodeCacheTest, Int64Constant_hits) {
- Int64NodeCache cache;
+ Int64NodeCache cache(zone());
const int32_t kSize = 1500;
Node** nodes = zone()->NewArray<Node*>(kSize);
for (int i = 0; i < kSize; i++) {
int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
nodes[i] = graph()->NewNode(common()->Int32Constant(i));
- *cache.Find(zone(), v) = nodes[i];
+ *cache.Find(v) = nodes[i];
}
int hits = 0;
for (int i = 0; i < kSize; i++) {
int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
- Node** pos = cache.Find(zone(), v);
+ Node** pos = cache.Find(v);
if (*pos != nullptr) {
EXPECT_EQ(nodes[i], *pos);
hits++;
@@ -111,13 +111,13 @@ TEST_F(NodeCacheTest, Int64Constant_hits) {
TEST_F(NodeCacheTest, GetCachedNodes_int32) {
- Int32NodeCache cache;
+ Int32NodeCache cache(zone());
int32_t constants[] = {0, 311, 12, 13, 14, 555, -555, -44, -33, -22, -11,
0, 311, 311, 412, 412, 11, 11, -33, -33, -22, -11};
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
- Node** pos = cache.Find(zone(), k);
+ Node** pos = cache.Find(k);
if (*pos != nullptr) {
ZoneVector<Node*> nodes(zone());
cache.GetCachedNodes(&nodes);
@@ -134,13 +134,13 @@ TEST_F(NodeCacheTest, GetCachedNodes_int32) {
TEST_F(NodeCacheTest, GetCachedNodes_int64) {
- Int64NodeCache cache;
+ Int64NodeCache cache(zone());
int64_t constants[] = {0, 311, 12, 13, 14, 555, -555, -44, -33, -22, -11,
0, 311, 311, 412, 412, 11, 11, -33, -33, -22, -11};
for (size_t i = 0; i < arraysize(constants); i++) {
int64_t k = constants[i];
- Node** pos = cache.Find(zone(), k);
+ Node** pos = cache.Find(k);
if (*pos != nullptr) {
ZoneVector<Node*> nodes(zone());
cache.GetCachedNodes(&nodes);
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 62135dddce..9dda52ed8e 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -754,16 +754,18 @@ TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* check1 = effect =
- graph()->NewNode(simplified()->CheckedUint64Bounds(feedback1), index,
- length, effect, control);
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedUint64Bounds(
+ feedback1, CheckBoundsParameters::kDeoptOnOutOfBounds),
+ index, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
- Node* check2 = effect =
- graph()->NewNode(simplified()->CheckedUint64Bounds(feedback2), index,
- length, effect, control);
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedUint64Bounds(
+ feedback2, CheckBoundsParameters::kDeoptOnOutOfBounds),
+ index, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
diff --git a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
index e6ba7696c5..c24b2f2d97 100644
--- a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
+++ b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
@@ -153,11 +153,13 @@ TEST_F(StateValuesIteratorTest, TreeFromVectorWithLiveness) {
// Check the tree contents with vector.
int i = 0;
- for (StateValuesAccess::TypedNode node : StateValuesAccess(values_node)) {
+ for (StateValuesAccess::iterator it =
+ StateValuesAccess(values_node).begin();
+ !it.done(); ++it) {
if (liveness.Contains(i)) {
- EXPECT_THAT(node.node, IsInt32Constant(i));
+ EXPECT_THAT(it.node(), IsInt32Constant(i));
} else {
- EXPECT_EQ(node.node, nullptr);
+ EXPECT_EQ(it.node(), nullptr);
}
i++;
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index bfb65e7c5f..8ecee3f8a1 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -175,7 +175,7 @@ class TyperTest : public TypedGraphTest {
for (int x1 = lmin; x1 < lmin + width; x1++) {
for (int x2 = rmin; x2 < rmin + width; x2++) {
double result_value = opfun(x1, x2);
- Type result_type = Type::NewConstant(
+ Type result_type = Type::Constant(
&broker_, isolate()->factory()->NewNumber(result_value),
zone());
EXPECT_TRUE(result_type.Is(expected_type));
@@ -197,7 +197,7 @@ class TyperTest : public TypedGraphTest {
double x1 = RandomInt(r1.AsRange());
double x2 = RandomInt(r2.AsRange());
double result_value = opfun(x1, x2);
- Type result_type = Type::NewConstant(
+ Type result_type = Type::Constant(
&broker_, isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
@@ -205,13 +205,13 @@ class TyperTest : public TypedGraphTest {
// Test extreme cases.
double x1 = +1e-308;
double x2 = -1e-308;
- Type r1 = Type::NewConstant(&broker_, isolate()->factory()->NewNumber(x1),
- zone());
- Type r2 = Type::NewConstant(&broker_, isolate()->factory()->NewNumber(x2),
- zone());
+ Type r1 =
+ Type::Constant(&broker_, isolate()->factory()->NewNumber(x1), zone());
+ Type r2 =
+ Type::Constant(&broker_, isolate()->factory()->NewNumber(x2), zone());
Type expected_type = TypeBinaryOp(op, r1, r2);
double result_value = opfun(x1, x2);
- Type result_type = Type::NewConstant(
+ Type result_type = Type::Constant(
&broker_, isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
@@ -226,11 +226,11 @@ class TyperTest : public TypedGraphTest {
double x1 = RandomInt(r1.AsRange());
double x2 = RandomInt(r2.AsRange());
bool result_value = opfun(x1, x2);
- Type result_type = Type::NewConstant(
- &broker_,
- result_value ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value(),
- zone());
+ Type result_type =
+ Type::Constant(&broker_,
+ result_value ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value(),
+ zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
}
@@ -246,7 +246,7 @@ class TyperTest : public TypedGraphTest {
int32_t x1 = static_cast<int32_t>(RandomInt(r1.AsRange()));
int32_t x2 = static_cast<int32_t>(RandomInt(r2.AsRange()));
double result_value = opfun(x1, x2);
- Type result_type = Type::NewConstant(
+ Type result_type = Type::Constant(
&broker_, isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type.Is(expected_type));
}
@@ -585,6 +585,96 @@ SIMPLIFIED_NUMBER_BINOP_LIST(TEST_MONOTONICITY)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(TEST_MONOTONICITY)
#undef TEST_MONOTONICITY
+TEST_F(TyperTest, Manual_Operation_NumberMax) {
+ BinaryTyper t = [&](Type type1, Type type2) {
+ return operation_typer_.NumberMax(type1, type2);
+ };
+
+ Type zero = Type::Constant(0, zone());
+ Type zero_or_minuszero = Type::Union(zero, Type::MinusZero(), zone());
+ Type dot_five = Type::Constant(0.5, zone());
+
+ Type a = t(Type::MinusZero(), Type::MinusZero());
+ CHECK(Type::MinusZero().Is(a));
+
+ Type b = t(Type::MinusZero(), zero_or_minuszero);
+ CHECK(Type::MinusZero().Is(b));
+ CHECK(zero.Is(b));
+ CHECK(a.Is(b));
+
+ Type c = t(zero_or_minuszero, Type::MinusZero());
+ CHECK(Type::MinusZero().Is(c));
+ CHECK(zero.Is(c));
+ CHECK(a.Is(c));
+
+ Type d = t(zero_or_minuszero, zero_or_minuszero);
+ CHECK(Type::MinusZero().Is(d));
+ CHECK(zero.Is(d));
+ CHECK(b.Is(d));
+ CHECK(c.Is(d));
+
+ Type e =
+ t(Type::MinusZero(), Type::Union(Type::MinusZero(), dot_five, zone()));
+ CHECK(Type::MinusZero().Is(e));
+ CHECK(dot_five.Is(e));
+ CHECK(a.Is(e));
+
+ Type f = t(Type::MinusZero(), zero);
+ CHECK(zero.Is(f));
+ CHECK(f.Is(b));
+
+ Type g = t(zero, Type::MinusZero());
+ CHECK(zero.Is(g));
+ CHECK(g.Is(c));
+}
+
+TEST_F(TyperTest, Manual_Operation_NumberMin) {
+ BinaryTyper t = [&](Type type1, Type type2) {
+ return operation_typer_.NumberMin(type1, type2);
+ };
+
+ Type zero = Type::Constant(0, zone());
+ Type zero_or_minuszero = Type::Union(zero, Type::MinusZero(), zone());
+ Type one = Type::Constant(1, zone());
+ Type minus_dot_five = Type::Constant(-0.5, zone());
+
+ Type a = t(Type::MinusZero(), Type::MinusZero());
+ CHECK(Type::MinusZero().Is(a));
+
+ Type b = t(Type::MinusZero(), zero_or_minuszero);
+ CHECK(Type::MinusZero().Is(b));
+ CHECK(zero.Is(b));
+ CHECK(a.Is(b));
+
+ Type c = t(zero_or_minuszero, Type::MinusZero());
+ CHECK(Type::MinusZero().Is(c));
+ CHECK(zero.Is(c));
+ CHECK(a.Is(c));
+
+ Type d = t(zero_or_minuszero, zero_or_minuszero);
+ CHECK(Type::MinusZero().Is(d));
+ CHECK(zero.Is(d));
+ CHECK(b.Is(d));
+ CHECK(c.Is(d));
+
+ Type e = t(Type::MinusZero(),
+ Type::Union(Type::MinusZero(), minus_dot_five, zone()));
+ CHECK(Type::MinusZero().Is(e));
+ CHECK(minus_dot_five.Is(e));
+ CHECK(a.Is(e));
+
+ Type f = t(Type::MinusZero(), zero);
+ CHECK(Type::MinusZero().Is(f));
+ CHECK(f.Is(b));
+
+ Type g = t(zero, Type::MinusZero());
+ CHECK(Type::MinusZero().Is(g));
+ CHECK(g.Is(c));
+
+ Type h = t(one, Type::MinusZero());
+ CHECK(Type::MinusZero().Is(h));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 6cb9df0895..3b59b7cf35 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -30,10 +30,10 @@ void RunStdFunction(void* data) {
}
template <typename TMixin>
-class WithFinalizationGroupMixin : public TMixin {
+class WithFinalizationRegistryMixin : public TMixin {
public:
- WithFinalizationGroupMixin() = default;
- ~WithFinalizationGroupMixin() override = default;
+ WithFinalizationRegistryMixin() = default;
+ ~WithFinalizationRegistryMixin() override = default;
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
@@ -54,21 +54,29 @@ class WithFinalizationGroupMixin : public TMixin {
private:
static SaveFlags* save_flags_;
- DISALLOW_COPY_AND_ASSIGN(WithFinalizationGroupMixin);
+ DISALLOW_COPY_AND_ASSIGN(WithFinalizationRegistryMixin);
};
template <typename TMixin>
-SaveFlags* WithFinalizationGroupMixin<TMixin>::save_flags_ = nullptr;
-
-using TestWithNativeContextAndFinalizationGroup = //
- WithInternalIsolateMixin< //
- WithContextMixin< //
- WithFinalizationGroupMixin< //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+SaveFlags* WithFinalizationRegistryMixin<TMixin>::save_flags_ = nullptr;
+
+using TestWithNativeContextAndFinalizationRegistry = //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithFinalizationRegistryMixin< //
+ WithIsolateScopeMixin< //
+ WithSharedIsolateMixin< //
::testing::Test>>>>>;
-class MicrotaskQueueTest : public TestWithNativeContextAndFinalizationGroup {
+namespace {
+
+void DummyPromiseHook(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent) {}
+
+} // namespace
+
+class MicrotaskQueueTest : public TestWithNativeContextAndFinalizationRegistry,
+ public ::testing::WithParamInterface<bool> {
public:
template <typename F>
Handle<Microtask> NewMicrotask(F&& f) {
@@ -82,6 +90,12 @@ class MicrotaskQueueTest : public TestWithNativeContextAndFinalizationGroup {
void SetUp() override {
microtask_queue_ = MicrotaskQueue::New(isolate());
native_context()->set_microtask_queue(microtask_queue());
+
+ if (GetParam()) {
+ // Use a PromiseHook to switch the implementation to ResolvePromise
+ // runtime, instead of ResolvePromise builtin.
+ v8_isolate()->SetPromiseHook(&DummyPromiseHook);
+ }
}
void TearDown() override {
@@ -126,7 +140,7 @@ class RecordingVisitor : public RootVisitor {
};
// Sanity check. Ensure a microtask is stored in a queue and run.
-TEST_F(MicrotaskQueueTest, EnqueueAndRun) {
+TEST_P(MicrotaskQueueTest, EnqueueAndRun) {
bool ran = false;
EXPECT_EQ(0, microtask_queue()->capacity());
EXPECT_EQ(0, microtask_queue()->size());
@@ -142,7 +156,7 @@ TEST_F(MicrotaskQueueTest, EnqueueAndRun) {
}
// Check for a buffer growth.
-TEST_F(MicrotaskQueueTest, BufferGrowth) {
+TEST_P(MicrotaskQueueTest, BufferGrowth) {
int count = 0;
// Enqueue and flush the queue first to have non-zero |start_|.
@@ -176,7 +190,7 @@ TEST_F(MicrotaskQueueTest, BufferGrowth) {
}
// MicrotaskQueue instances form a doubly linked list.
-TEST_F(MicrotaskQueueTest, InstanceChain) {
+TEST_P(MicrotaskQueueTest, InstanceChain) {
ClearTestMicrotaskQueue();
MicrotaskQueue* default_mtq = isolate()->default_microtask_queue();
@@ -207,7 +221,7 @@ TEST_F(MicrotaskQueueTest, InstanceChain) {
// Pending Microtasks in MicrotaskQueues are strong roots. Ensure they are
// visited exactly once.
-TEST_F(MicrotaskQueueTest, VisitRoot) {
+TEST_P(MicrotaskQueueTest, VisitRoot) {
// Ensure that the ring buffer has separate in-use region.
for (int i = 0; i < MicrotaskQueue::kMinimumCapacity / 2 + 1; ++i) {
microtask_queue()->EnqueueMicrotask(*NewMicrotask([] {}));
@@ -233,7 +247,7 @@ TEST_F(MicrotaskQueueTest, VisitRoot) {
EXPECT_EQ(expected, actual);
}
-TEST_F(MicrotaskQueueTest, PromiseHandlerContext) {
+TEST_P(MicrotaskQueueTest, PromiseHandlerContext) {
Local<v8::Context> v8_context2 = v8::Context::New(v8_isolate());
Local<v8::Context> v8_context3 = v8::Context::New(v8_isolate());
Local<v8::Context> v8_context4 = v8::Context::New(v8_isolate());
@@ -327,7 +341,7 @@ TEST_F(MicrotaskQueueTest, PromiseHandlerContext) {
v8_context2->DetachGlobal();
}
-TEST_F(MicrotaskQueueTest, DetachGlobal_Enqueue) {
+TEST_P(MicrotaskQueueTest, DetachGlobal_Enqueue) {
EXPECT_EQ(0, microtask_queue()->size());
// Detach MicrotaskQueue from the current context.
@@ -339,7 +353,7 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_Enqueue) {
EXPECT_EQ(0, microtask_queue()->size());
}
-TEST_F(MicrotaskQueueTest, DetachGlobal_Run) {
+TEST_P(MicrotaskQueueTest, DetachGlobal_Run) {
EXPECT_EQ(0, microtask_queue()->size());
// Enqueue microtasks to the current context.
@@ -377,18 +391,7 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_Run) {
}
}
-namespace {
-
-void DummyPromiseHook(PromiseHookType type, Local<Promise> promise,
- Local<Value> parent) {}
-
-} // namespace
-
-TEST_F(MicrotaskQueueTest, DetachGlobal_PromiseResolveThenableJobTask) {
- // Use a PromiseHook to switch the implementation to ResolvePromise runtime,
- // instead of ResolvePromise builtin.
- v8_isolate()->SetPromiseHook(&DummyPromiseHook);
-
+TEST_P(MicrotaskQueueTest, DetachGlobal_PromiseResolveThenableJobTask) {
RunJS(
"var resolve;"
"var promise = new Promise(r => { resolve = r; });"
@@ -410,7 +413,71 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_PromiseResolveThenableJobTask) {
EXPECT_EQ(0, microtask_queue()->size());
}
-TEST_F(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
+TEST_P(MicrotaskQueueTest, DetachGlobal_ResolveThenableForeignThen) {
+ Handle<JSArray> result = RunJS<JSArray>(
+ "let result = [false];"
+ "result");
+ Handle<JSFunction> then = RunJS<JSFunction>("() => { result[0] = true; }");
+
+ Handle<JSPromise> stale_promise;
+
+ {
+ // Create a context with its own microtask queue.
+ std::unique_ptr<MicrotaskQueue> sub_microtask_queue =
+ MicrotaskQueue::New(isolate());
+ Local<v8::Context> sub_context = v8::Context::New(
+ v8_isolate(),
+ /* extensions= */ nullptr,
+ /* global_template= */ MaybeLocal<ObjectTemplate>(),
+ /* global_object= */ MaybeLocal<Value>(),
+ /* internal_fields_deserializer= */ DeserializeInternalFieldsCallback(),
+ sub_microtask_queue.get());
+
+ {
+ v8::Context::Scope scope(sub_context);
+ CHECK(sub_context->Global()
+ ->Set(sub_context, NewString("then"),
+ Utils::ToLocal(Handle<JSReceiver>::cast(then)))
+ .FromJust());
+
+ ASSERT_EQ(0, microtask_queue()->size());
+ ASSERT_EQ(0, sub_microtask_queue->size());
+ ASSERT_TRUE(Object::GetElement(isolate(), result, 0)
+ .ToHandleChecked()
+ ->IsFalse());
+
+ // With a regular thenable, a microtask is queued on the sub-context.
+ RunJS<JSPromise>("Promise.resolve({ then: cb => cb(1) })");
+ EXPECT_EQ(0, microtask_queue()->size());
+ EXPECT_EQ(1, sub_microtask_queue->size());
+ EXPECT_TRUE(Object::GetElement(isolate(), result, 0)
+ .ToHandleChecked()
+ ->IsFalse());
+
+ // But when the `then` method comes from another context, a microtask is
+ // instead queued on the main context.
+ stale_promise = RunJS<JSPromise>("Promise.resolve({ then })");
+ EXPECT_EQ(1, microtask_queue()->size());
+ EXPECT_EQ(1, sub_microtask_queue->size());
+ EXPECT_TRUE(Object::GetElement(isolate(), result, 0)
+ .ToHandleChecked()
+ ->IsFalse());
+ }
+
+ sub_context->DetachGlobal();
+ }
+
+ EXPECT_EQ(1, microtask_queue()->size());
+ EXPECT_TRUE(
+ Object::GetElement(isolate(), result, 0).ToHandleChecked()->IsFalse());
+
+ EXPECT_EQ(1, microtask_queue()->RunMicrotasks(isolate()));
+ EXPECT_EQ(0, microtask_queue()->size());
+ EXPECT_TRUE(
+ Object::GetElement(isolate(), result, 0).ToHandleChecked()->IsTrue());
+}
+
+TEST_P(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
// EnqueueMicrotask should use the context associated to the handler instead
// of the current context. E.g.
// // At Context A.
@@ -489,7 +556,7 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
.FromJust());
}
-TEST_F(MicrotaskQueueTest, DetachGlobal_Chain) {
+TEST_P(MicrotaskQueueTest, DetachGlobal_Chain) {
Handle<JSPromise> stale_rejected_promise;
Local<v8::Context> sub_context = v8::Context::New(v8_isolate());
@@ -516,7 +583,7 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_Chain) {
Object::GetElement(isolate(), result, 0).ToHandleChecked()->IsTrue());
}
-TEST_F(MicrotaskQueueTest, DetachGlobal_InactiveHandler) {
+TEST_P(MicrotaskQueueTest, DetachGlobal_InactiveHandler) {
Local<v8::Context> sub_context = v8::Context::New(v8_isolate());
Utils::OpenHandle(*sub_context)
->native_context()
@@ -558,7 +625,7 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_InactiveHandler) {
Object::GetElement(isolate(), result, 1).ToHandleChecked()->IsFalse());
}
-TEST_F(MicrotaskQueueTest, MicrotasksScope) {
+TEST_P(MicrotaskQueueTest, MicrotasksScope) {
ASSERT_NE(isolate()->default_microtask_queue(), microtask_queue());
microtask_queue()->set_microtasks_policy(MicrotasksPolicy::kScoped);
@@ -574,5 +641,11 @@ TEST_F(MicrotaskQueueTest, MicrotasksScope) {
EXPECT_TRUE(ran);
}
+INSTANTIATE_TEST_SUITE_P(
+ , MicrotaskQueueTest, ::testing::Values(false, true),
+ [](const ::testing::TestParamInfo<MicrotaskQueueTest::ParamType>& info) {
+ return info.param ? "runtime" : "builtin";
+ });
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc b/deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc
new file mode 100644
index 0000000000..3a02ae1721
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/allocation.h"
+
+#include <memory>
+
+#include "src/heap/cppgc/heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+
+TEST(GCBasicHeapTest, CreateAndDestroyHeap) {
+ std::unique_ptr<Heap> heap{Heap::Create()};
+}
+
+namespace {
+
+class Foo : public GarbageCollected<Foo> {
+ public:
+ static size_t destructor_callcount;
+
+ Foo() { destructor_callcount = 0; }
+ ~Foo() { destructor_callcount++; }
+};
+
+size_t Foo::destructor_callcount;
+
+class GCAllocationTest : public testing::TestWithHeap {};
+
+} // namespace
+
+TEST_F(GCAllocationTest, MakeGarbageCollectedAndReclaim) {
+ MakeGarbageCollected<Foo>(GetHeap());
+ EXPECT_EQ(0u, Foo::destructor_callcount);
+ internal::Heap::From(GetHeap())->CollectGarbage();
+ EXPECT_EQ(1u, Foo::destructor_callcount);
+}
+
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc b/deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc
new file mode 100644
index 0000000000..91a255e727
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/finalizer-trait.h"
+#include <type_traits>
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+// Trivially destructible types.
+class TypeWithoutDestructor final {};
+class TypeWithPrimitive final {
+ public:
+ int foo = 0;
+};
+
+class InvokeCounter {
+ public:
+ static size_t kCallcount;
+ static void Reset() { kCallcount = 0; }
+ static void Invoke() { kCallcount++; }
+};
+
+size_t InvokeCounter::kCallcount = 0;
+
+// Regular C++ use cases.
+
+class TypeWithDestructor final : public InvokeCounter {
+ public:
+ ~TypeWithDestructor() { Invoke(); }
+};
+
+class TypeWithVirtualDestructorBase {
+ public:
+ virtual ~TypeWithVirtualDestructorBase() = default;
+};
+
+class TypeWithVirtualDestructorChild final
+ : public TypeWithVirtualDestructorBase,
+ public InvokeCounter {
+ public:
+ ~TypeWithVirtualDestructorChild() final { Invoke(); }
+};
+
+// Manual dispatch to avoid vtables.
+
+class TypeWithCustomFinalizationMethod final : public InvokeCounter {
+ public:
+ void FinalizeGarbageCollectedObject() { Invoke(); }
+};
+
+class TypeWithCustomFinalizationMethodAtBase {
+ public:
+ void FinalizeGarbageCollectedObject();
+};
+
+class TypeWithCustomFinalizationMethodAtBaseChild
+ : public TypeWithCustomFinalizationMethodAtBase,
+ public InvokeCounter {
+ public:
+ ~TypeWithCustomFinalizationMethodAtBaseChild() { Invoke(); }
+};
+
+void TypeWithCustomFinalizationMethodAtBase::FinalizeGarbageCollectedObject() {
+ // The test knows that base is only inherited by a single child. In practice
+ // users can maintain a map of valid types in already existing storage.
+ static_cast<TypeWithCustomFinalizationMethodAtBaseChild*>(this)
+ ->~TypeWithCustomFinalizationMethodAtBaseChild();
+}
+
+template <typename Type>
+void ExpectFinalizerIsInvoked(Type* object) {
+ InvokeCounter::Reset();
+ EXPECT_NE(nullptr, FinalizerTrait<Type>::kCallback);
+ FinalizerTrait<Type>::kCallback(object);
+ EXPECT_EQ(1u, InvokeCounter::kCallcount);
+ operator delete(object);
+}
+
+} // namespace
+
+TEST(FinalizerTrait, TypeWithoutDestructorHasNoFinalizer) {
+ static_assert(std::is_trivially_destructible<TypeWithoutDestructor>::value,
+ "trivially destructible");
+ EXPECT_EQ(nullptr, FinalizerTrait<TypeWithoutDestructor>::kCallback);
+}
+
+TEST(FinalizerTrait, TypeWithPrimitiveHasNoFinalizer) {
+ static_assert(std::is_trivially_destructible<TypeWithPrimitive>::value,
+ "trivially destructible");
+ EXPECT_EQ(nullptr, FinalizerTrait<TypeWithPrimitive>::kCallback);
+}
+
+TEST(FinalizerTrait, FinalizerForTypeWithDestructor) {
+ ExpectFinalizerIsInvoked(new TypeWithDestructor());
+}
+
+TEST(FinalizerTrait, FinalizerForTypeWithVirtualBaseDtor) {
+ TypeWithVirtualDestructorBase* base = new TypeWithVirtualDestructorChild();
+ ExpectFinalizerIsInvoked(base);
+}
+
+TEST(FinalizerTrait, FinalizerForCustomFinalizationMethod) {
+ ExpectFinalizerIsInvoked(new TypeWithCustomFinalizationMethod());
+}
+
+TEST(FinalizerTrait, FinalizerForCustomFinalizationMethodInBase) {
+ TypeWithCustomFinalizationMethodAtBase* base =
+ new TypeWithCustomFinalizationMethodAtBaseChild();
+ ExpectFinalizerIsInvoked(base);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc b/deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc
new file mode 100644
index 0000000000..5098bdf48e
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/garbage-collected.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class GCed : public GarbageCollected<GCed> {};
+class NotGCed {};
+
+} // namespace
+
+TEST(GarbageCollectedTest, GarbageCollectedTrait) {
+ EXPECT_FALSE(IsGarbageCollectedType<int>::value);
+ EXPECT_FALSE(IsGarbageCollectedType<NotGCed>::value);
+ EXPECT_TRUE(IsGarbageCollectedType<GCed>::value);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc
new file mode 100644
index 0000000000..e7bfb5a7fe
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc
@@ -0,0 +1,153 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/gc-info.h"
+
+#include "include/cppgc/platform.h"
+#include "src/base/page-allocator.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/gc-info-table.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+TEST(GCInfoTableTest, InitialEmpty) {
+ v8::base::PageAllocator page_allocator;
+ GCInfoTable table(&page_allocator);
+ EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfosForTesting());
+}
+
+TEST(GCInfoTableTest, ResizeToMaxIndex) {
+ v8::base::PageAllocator page_allocator;
+ GCInfoTable table(&page_allocator);
+ GCInfo info = {nullptr, false};
+ for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
+ i++) {
+ GCInfoIndex index = table.RegisterNewGCInfo(info);
+ EXPECT_EQ(i, index);
+ }
+}
+
+TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
+ v8::base::PageAllocator page_allocator;
+ GCInfoTable table(&page_allocator);
+ GCInfo info = {nullptr, false};
+ // Create GCInfoTable::kMaxIndex entries.
+ for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
+ i++) {
+ table.RegisterNewGCInfo(info);
+ }
+ EXPECT_DEATH_IF_SUPPORTED(table.RegisterNewGCInfo(info), "");
+}
+
+TEST(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
+ v8::base::PageAllocator page_allocator;
+ GCInfoTable table(&page_allocator);
+ GCInfo info = {nullptr, false};
+ // Use up all slots until limit.
+ GCInfoIndex limit = table.LimitForTesting();
+ // Bail out if initial limit is already the maximum because of large committed
+ // pages. In this case, nothing can be comitted as read-only.
+ if (limit == GCInfoTable::kMaxIndex) {
+ return;
+ }
+ for (GCInfoIndex i = GCInfoTable::kMinIndex; i < limit; i++) {
+ table.RegisterNewGCInfo(info);
+ }
+ EXPECT_EQ(limit, table.LimitForTesting());
+ table.RegisterNewGCInfo(info);
+ EXPECT_NE(limit, table.LimitForTesting());
+ // Old area is now read-only.
+ auto& first_slot = table.TableSlotForTesting(GCInfoTable::kMinIndex);
+ EXPECT_DEATH_IF_SUPPORTED(first_slot.finalize = nullptr, "");
+}
+
+namespace {
+
+class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
+ public:
+ ThreadRegisteringGCInfoObjects(GCInfoTable* table,
+ GCInfoIndex num_registrations)
+ : v8::base::Thread(Options("Thread registering GCInfo objects.")),
+ table_(table),
+ num_registrations_(num_registrations) {}
+
+ void Run() final {
+ GCInfo info = {nullptr, false};
+ for (GCInfoIndex i = 0; i < num_registrations_; i++) {
+ table_->RegisterNewGCInfo(info);
+ }
+ }
+
+ private:
+ GCInfoTable* table_;
+ GCInfoIndex num_registrations_;
+};
+
+} // namespace
+
+TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
+ constexpr size_t num_threads = 4;
+ constexpr size_t main_thread_initialized = 2;
+ constexpr size_t gc_infos_to_register =
+ (GCInfoTable::kMaxIndex - 1) -
+ (GCInfoTable::kMinIndex + main_thread_initialized);
+ static_assert(gc_infos_to_register % num_threads == 0,
+ "must sum up to kMaxIndex");
+ constexpr size_t gc_infos_per_thread = gc_infos_to_register / num_threads;
+
+ v8::base::PageAllocator page_allocator;
+ GCInfoTable table(&page_allocator);
+ GCInfo info = {nullptr, false};
+ for (size_t i = 0; i < main_thread_initialized; i++) {
+ table.RegisterNewGCInfo(info);
+ }
+
+ v8::base::Thread* threads[num_threads];
+ for (size_t i = 0; i < num_threads; i++) {
+ threads[i] =
+ new ThreadRegisteringGCInfoObjects(&table, gc_infos_per_thread);
+ }
+ for (size_t i = 0; i < num_threads; i++) {
+ CHECK(threads[i]->Start());
+ }
+ for (size_t i = 0; i < num_threads; i++) {
+ threads[i]->Join();
+ delete threads[i];
+ }
+}
+
+// Tests using the global table and GCInfoTrait.
+
+namespace {
+
+class GCInfoTraitTest : public testing::TestWithPlatform {};
+
+class BasicType final {};
+class OtherBasicType final {};
+
+} // namespace
+
+TEST_F(GCInfoTraitTest, IndexInBounds) {
+ const GCInfoIndex index = GCInfoTrait<BasicType>::Index();
+ EXPECT_GT(GCInfoTable::kMaxIndex, index);
+ EXPECT_LE(GCInfoTable::kMinIndex, index);
+}
+
+TEST_F(GCInfoTraitTest, TraitReturnsSameIndexForSameType) {
+ const GCInfoIndex index1 = GCInfoTrait<BasicType>::Index();
+ const GCInfoIndex index2 = GCInfoTrait<BasicType>::Index();
+ EXPECT_EQ(index1, index2);
+}
+
+TEST_F(GCInfoTraitTest, TraitReturnsDifferentIndexForDifferentTypes) {
+ const GCInfoIndex index1 = GCInfoTrait<BasicType>::Index();
+ const GCInfoIndex index2 = GCInfoTrait<OtherBasicType>::Index();
+ EXPECT_NE(index1, index2);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc
new file mode 100644
index 0000000000..b062489cb3
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc
@@ -0,0 +1,181 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-object-header.h"
+
+#include <atomic>
+#include <memory>
+
+#include "include/cppgc/allocation.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+TEST(HeapObjectHeaderTest, Constructor) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(kSize, header.GetSize());
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
+ EXPECT_TRUE(header.IsInConstruction());
+ EXPECT_FALSE(header.IsMarked());
+}
+
+TEST(HeapObjectHeaderTest, Payload) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(reinterpret_cast<ConstAddress>(&header) + sizeof(HeapObjectHeader),
+ header.Payload());
+}
+
+TEST(HeapObjectHeaderTest, GetGCInfoIndex) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
+ EXPECT_EQ(kGCInfoIndex,
+ header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+}
+
+TEST(HeapObjectHeaderTest, GetSize) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity * 23;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(kSize, header.GetSize());
+ EXPECT_EQ(kSize, header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+}
+
+TEST(HeapObjectHeaderTest, IsLargeObject) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity * 23;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(false, header.IsLargeObject());
+ EXPECT_EQ(false,
+ header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
+ HeapObjectHeader large_header(0, kGCInfoIndex + 1);
+ EXPECT_EQ(true, large_header.IsLargeObject());
+ EXPECT_EQ(
+ true,
+ large_header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
+}
+
+TEST(HeapObjectHeaderTest, MarkObjectAsFullyConstructed) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_TRUE(header.IsInConstruction());
+ header.MarkAsFullyConstructed();
+ EXPECT_FALSE(header.IsInConstruction());
+ // Size shares the same bitfield and should be unaffected by
+ // MarkObjectAsFullyConstructed.
+ EXPECT_EQ(kSize, header.GetSize());
+}
+
+TEST(HeapObjectHeaderTest, TryMark) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity * 7;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_FALSE(header.IsMarked());
+ EXPECT_TRUE(header.TryMarkAtomic());
+ // GCInfoIndex shares the same bitfield and should be unaffected by
+ // TryMarkAtomic.
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
+ EXPECT_FALSE(header.TryMarkAtomic());
+ // GCInfoIndex shares the same bitfield and should be unaffected by
+ // TryMarkAtomic.
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
+ EXPECT_TRUE(header.IsMarked());
+}
+
+TEST(HeapObjectHeaderTest, Unmark) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity * 7;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_FALSE(header.IsMarked());
+ EXPECT_TRUE(header.TryMarkAtomic());
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
+ EXPECT_TRUE(header.IsMarked());
+ header.Unmark();
+ // GCInfoIndex shares the same bitfield and should be unaffected by Unmark.
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
+ EXPECT_FALSE(header.IsMarked());
+ HeapObjectHeader header2(kSize, kGCInfoIndex);
+ EXPECT_FALSE(header2.IsMarked());
+ EXPECT_TRUE(header2.TryMarkAtomic());
+ EXPECT_TRUE(header2.IsMarked());
+ header2.Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+ // GCInfoIndex shares the same bitfield and should be unaffected by Unmark.
+ EXPECT_EQ(kGCInfoIndex, header2.GetGCInfoIndex());
+ EXPECT_FALSE(header2.IsMarked());
+}
+
+namespace {
+
+struct Payload {
+ volatile size_t value{5};
+};
+
+class ConcurrentGCThread final : public v8::base::Thread {
+ public:
+ explicit ConcurrentGCThread(HeapObjectHeader* header, Payload* payload)
+ : v8::base::Thread(Options("Thread accessing object.")),
+ header_(header),
+ payload_(payload) {}
+
+ void Run() final {
+ while (header_->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
+ }
+ USE(v8::base::AsAtomicPtr(const_cast<size_t*>(&payload_->value))
+ ->load(std::memory_order_relaxed));
+ }
+
+ private:
+ HeapObjectHeader* header_;
+ Payload* payload_;
+};
+
+} // namespace
+
+TEST(HeapObjectHeaderTest, ConstructionBitProtectsNonAtomicWrites) {
+ // Object publishing: Test checks that non-atomic stores in the payload can be
+ // guarded using MarkObjectAsFullyConstructed/IsInConstruction. The test
+ // relies on TSAN to find data races.
+ constexpr size_t kSize =
+ (sizeof(HeapObjectHeader) + sizeof(Payload) + kAllocationMask) &
+ ~kAllocationMask;
+ typename std::aligned_storage<kSize, kAllocationGranularity>::type data;
+ HeapObjectHeader* header = new (&data) HeapObjectHeader(kSize, 1);
+ ConcurrentGCThread gc_thread(header,
+ reinterpret_cast<Payload*>(header->Payload()));
+ CHECK(gc_thread.Start());
+ new (header->Payload()) Payload();
+ header->MarkAsFullyConstructed();
+ gc_thread.Join();
+}
+
+#ifdef DEBUG
+
+TEST(HeapObjectHeaderDeathTest, ConstructorTooLargeSize) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = HeapObjectHeader::kMaxSize + 1;
+ EXPECT_DEATH_IF_SUPPORTED(HeapObjectHeader header(kSize, kGCInfoIndex), "");
+}
+
+TEST(HeapObjectHeaderDeathTest, ConstructorTooLargeGCInfoIndex) {
+ constexpr GCInfoIndex kGCInfoIndex = GCInfoTable::kMaxIndex + 1;
+ constexpr size_t kSize = kAllocationGranularity;
+ EXPECT_DEATH_IF_SUPPORTED(HeapObjectHeader header(kSize, kGCInfoIndex), "");
+}
+
+#endif // DEBUG
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc b/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
new file mode 100644
index 0000000000..cdc862e309
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gmock/include/gmock/gmock.h"
+
+int main(int argc, char** argv) {
+ // Don't catch SEH exceptions and continue as the following tests might hang
+ // in an broken environment on windows.
+ testing::GTEST_FLAG(catch_exceptions) = false;
+
+ // Most unit-tests are multi-threaded, so enable thread-safe death-tests.
+ testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ testing::InitGoogleMock(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/deps/v8/test/unittests/heap/cppgc/stack_unittest.cc b/deps/v8/test/unittests/heap/cppgc/stack_unittest.cc
new file mode 100644
index 0000000000..435c06f83f
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/stack_unittest.cc
@@ -0,0 +1,256 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/stack.h"
+
+#include <memory>
+#include <ostream>
+
+#include "include/v8config.h"
+#include "src/base/platform/platform.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include <xmmintrin.h>
+#endif
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class GCStackTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ stack_.reset(new Stack(v8::base::Stack::GetStackStart()));
+ }
+
+ void TearDown() override { stack_.reset(); }
+
+ Stack* GetStack() const { return stack_.get(); }
+
+ private:
+ std::unique_ptr<Stack> stack_;
+};
+
+} // namespace
+
+TEST_F(GCStackTest, IsOnStackForStackValue) {
+ void* dummy;
+ EXPECT_TRUE(GetStack()->IsOnStack(&dummy));
+}
+
+TEST_F(GCStackTest, IsOnStackForHeapValue) {
+ auto dummy = std::make_unique<int>();
+ EXPECT_FALSE(GetStack()->IsOnStack(dummy.get()));
+}
+
+#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+
+namespace {
+
+class StackScanner final : public StackVisitor {
+ public:
+ struct Container {
+ std::unique_ptr<int> value;
+ };
+
+ StackScanner() : container_(new Container{}) {
+ container_->value = std::make_unique<int>();
+ }
+
+ void VisitPointer(const void* address) final {
+ if (address == container_->value.get()) found_ = true;
+ }
+
+ void Reset() { found_ = false; }
+ bool found() const { return found_; }
+ int* needle() const { return container_->value.get(); }
+
+ private:
+ std::unique_ptr<Container> container_;
+ bool found_ = false;
+};
+
+} // namespace
+
+TEST_F(GCStackTest, IteratePointersFindsOnStackValue) {
+ auto scanner = std::make_unique<StackScanner>();
+
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of the redzone or temporaries after setting it up throuhg
+ // StackScanner.
+ {
+ int* volatile tmp = scanner->needle();
+ USE(tmp);
+ GetStack()->IteratePointers(scanner.get());
+ EXPECT_TRUE(scanner->found());
+ }
+}
+
+TEST_F(GCStackTest, IteratePointersFindsOnStackValuePotentiallyUnaligned) {
+ auto scanner = std::make_unique<StackScanner>();
+
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of the redzone or temporaries after setting it up throuhg
+ // StackScanner.
+ {
+ char a = 'c';
+ USE(a);
+ int* volatile tmp = scanner->needle();
+ USE(tmp);
+ GetStack()->IteratePointers(scanner.get());
+ EXPECT_TRUE(scanner->found());
+ }
+}
+
+namespace {
+
+void RecursivelyPassOnParameter(int* volatile p1, int* volatile p2,
+ int* volatile p3, int* volatile p4,
+ int* volatile p5, int* volatile p6,
+ int* volatile p7, int* volatile p8,
+ Stack* stack, StackVisitor* visitor) {
+ if (p1) {
+ RecursivelyPassOnParameter(nullptr, p1, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, stack, visitor);
+ } else if (p2) {
+ RecursivelyPassOnParameter(nullptr, nullptr, p2, nullptr, nullptr, nullptr,
+ nullptr, nullptr, stack, visitor);
+ } else if (p3) {
+ RecursivelyPassOnParameter(nullptr, nullptr, nullptr, p3, nullptr, nullptr,
+ nullptr, nullptr, stack, visitor);
+ } else if (p4) {
+ RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, p4, nullptr,
+ nullptr, nullptr, stack, visitor);
+ } else if (p5) {
+ RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr, p5,
+ nullptr, nullptr, stack, visitor);
+ } else if (p6) {
+ RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, p6, nullptr, stack, visitor);
+ } else if (p7) {
+ RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, p7, stack, visitor);
+ } else if (p8) {
+ stack->IteratePointers(visitor);
+ }
+}
+
+} // namespace
+
+TEST_F(GCStackTest, IteratePointersFindsParameter) {
+ auto scanner = std::make_unique<StackScanner>();
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of the redzone or temporaries after setting it up throuhg
+ // StackScanner.
+ RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterInNestedFunction) {
+ auto scanner = std::make_unique<StackScanner>();
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of the redzone or temporaries after setting it up throuhg
+ // StackScanner.
+ RecursivelyPassOnParameter(scanner->needle(), nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr, GetStack(),
+ scanner.get());
+ EXPECT_TRUE(scanner->found());
+}
+
+// The following test uses inline assembly and has been checked to work on clang
+// to verify that the stack-scanning trampoline pushes callee-saved registers.
+//
+// The test uses a macro loop as asm() can only be passed string literals.
+#ifdef __clang__
+#ifdef V8_TARGET_ARCH_X64
+#ifdef V8_OS_WIN
+
+// Excluded from test: rbp
+#define FOR_ALL_CALLEE_SAVED_REGS(V) \
+ V("rdi") \
+ V("rsi") \
+ V("rbx") \
+ V("r12") \
+ V("r13") \
+ V("r14") \
+ V("r15")
+
+#else // !V8_OS_WIN
+
+// Excluded from test: rbp
+#define FOR_ALL_CALLEE_SAVED_REGS(V) \
+ V("rbx") \
+ V("r12") \
+ V("r13") \
+ V("r14") \
+ V("r15")
+
+#endif // !V8_OS_WIN
+#endif // V8_TARGET_ARCH_X64
+#endif // __clang__
+
+#ifdef FOR_ALL_CALLEE_SAVED_REGS
+
+TEST_F(GCStackTest, IteratePointersFindsCalleeSavedRegisters) {
+ auto scanner = std::make_unique<StackScanner>();
+
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of the redzone or temporaries after setting it up throuhg
+ // StackScanner.
+
+// First, clear all callee-saved registers.
+#define CLEAR_REGISTER(reg) asm("mov $0, %%" reg : : : reg);
+
+ FOR_ALL_CALLEE_SAVED_REGS(CLEAR_REGISTER)
+#undef CLEAR_REGISTER
+
+ // Keep local raw pointers to keep instruction sequences small below.
+ auto* local_stack = GetStack();
+ auto* local_scanner = scanner.get();
+
+// Moves |local_scanner->needle()| into a callee-saved register, leaving the
+// callee-saved register as the only register referencing the needle.
+// (Ignoring implementation-dependent dirty registers/stack.)
+#define KEEP_ALIVE_FROM_CALLEE_SAVED(reg) \
+ local_scanner->Reset(); \
+ /* This moves the temporary into the calee-saved register. */ \
+ asm("mov %0, %%" reg : : "r"(local_scanner->needle()) : reg); \
+ /* Register is unprotected from here till the actual invocation. */ \
+ local_stack->IteratePointers(local_scanner); \
+ EXPECT_TRUE(local_scanner->found()) \
+ << "pointer in callee-saved register not found. register: " << reg \
+ << std::endl; \
+ /* Clear out the register again */ \
+ asm("mov $0, %%" reg : : : reg);
+
+ FOR_ALL_CALLEE_SAVED_REGS(KEEP_ALIVE_FROM_CALLEE_SAVED)
+#undef KEEP_ALIVE_FROM_CALLEE_SAVED
+#undef FOR_ALL_CALLEE_SAVED_REGS
+}
+#endif // FOR_ALL_CALLEE_SAVED_REGS
+
+#if V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+class CheckStackAlignmentVisitor final : public StackVisitor {
+ public:
+ void VisitPointer(const void*) final {
+ float f[4] = {0.};
+ volatile auto xmm = ::_mm_load_ps(f);
+ USE(xmm);
+ }
+};
+
+TEST_F(GCStackTest, StackAlignment) {
+ auto checker = std::make_unique<CheckStackAlignmentVisitor>();
+ GetStack()->IteratePointers(checker.get());
+}
+#endif // V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.cc b/deps/v8/test/unittests/heap/cppgc/tests.cc
new file mode 100644
index 0000000000..e67ac730d6
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/tests.cc
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/heap/cppgc/tests.h"
+
+namespace cppgc {
+namespace testing {
+
+// static
+std::unique_ptr<cppgc::PageAllocator> TestWithPlatform::page_allocator_;
+
+// static
+void TestWithPlatform::SetUpTestSuite() {
+ page_allocator_.reset(new v8::base::PageAllocator());
+ cppgc::InitializePlatform(page_allocator_.get());
+}
+
+// static
+void TestWithPlatform::TearDownTestSuite() {
+ cppgc::ShutdownPlatform();
+ page_allocator_.reset();
+}
+
+void TestWithHeap::SetUp() {
+ heap_ = Heap::Create();
+ TestWithPlatform::SetUp();
+}
+
+void TestWithHeap::TearDown() {
+ heap_.reset();
+ TestWithPlatform::TearDown();
+}
+
+} // namespace testing
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
new file mode 100644
index 0000000000..d21f256444
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_HEAP_CPPGC_TESTS_H_
+#define V8_UNITTESTS_HEAP_CPPGC_TESTS_H_
+
+#include "include/cppgc/heap.h"
+#include "include/cppgc/platform.h"
+#include "src/base/page-allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace testing {
+
+class TestWithPlatform : public ::testing::Test {
+ protected:
+ static void SetUpTestSuite();
+ static void TearDownTestSuite();
+
+ private:
+ static std::unique_ptr<cppgc::PageAllocator> page_allocator_;
+};
+
+class TestWithHeap : public TestWithPlatform {
+ protected:
+ void SetUp() override;
+ void TearDown() override;
+
+ Heap* GetHeap() const { return heap_.get(); }
+
+ private:
+ std::unique_ptr<cppgc::Heap> heap_;
+};
+
+} // namespace testing
+} // namespace cppgc
+
+#endif // V8_UNITTESTS_HEAP_CPPGC_TESTS_H_
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index b3901d74b0..c46ee35095 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -30,7 +30,6 @@ class GCIdleTimeHandlerTest : public ::testing::Test {
static const size_t kSizeOfObjects = 100 * MB;
static const size_t kMarkCompactSpeed = 200 * KB;
- static const size_t kMarkingSpeed = 200 * KB;
private:
GCIdleTimeHandler handler_;
@@ -74,20 +73,6 @@ TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
}
-TEST_F(GCIdleTimeHandlerTest, ShouldDoFinalIncrementalMarkCompact) {
- size_t idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
- idle_time_ms, 0, 0));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DontDoFinalIncrementalMarkCompact) {
- size_t idle_time_ms = 1;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
- idle_time_ms, kSizeOfObjects, kMarkingSpeed));
-}
-
-
TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
if (!handler()->Enabled()) return;
GCIdleTimeHeapState heap_state = DefaultHeapState();
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
new file mode 100644
index 0000000000..bf6aad6efc
--- /dev/null
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/local-heap.h"
+#include "src/heap/heap.h"
+#include "src/heap/safepoint.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+using LocalHeapTest = TestWithIsolate;
+
+TEST_F(LocalHeapTest, Initialize) {
+ Heap* heap = i_isolate()->heap();
+
+ {
+ LocalHeap lh1(heap);
+ CHECK(heap->safepoint()->ContainsLocalHeap(&lh1));
+ LocalHeap lh2(heap);
+ CHECK(heap->safepoint()->ContainsLocalHeap(&lh2));
+
+ {
+ LocalHeap lh3(heap);
+ CHECK(heap->safepoint()->ContainsLocalHeap(&lh3));
+ }
+
+ CHECK(heap->safepoint()->ContainsLocalHeap(&lh1));
+ CHECK(heap->safepoint()->ContainsLocalHeap(&lh2));
+ }
+
+ CHECK(!heap->safepoint()->ContainsAnyLocalHeap());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc b/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc
index 2c7bd639a8..0592d7b2db 100644
--- a/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc
+++ b/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc
@@ -8,65 +8,139 @@
#include <memory>
#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles-inl.h"
#include "src/handles/handles.h"
-#include "src/heap/off-thread-factory.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/script.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/scanner.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
+class OffThreadIsolate;
+
+namespace {
+
+std::vector<uint16_t> DecodeUtf8(const std::string& string) {
+ if (string.empty()) return {};
+
+ auto utf8_data =
+ Vector<const uint8_t>::cast(VectorOf(string.data(), string.length()));
+ Utf8Decoder decoder(utf8_data);
+
+ std::vector<uint16_t> utf16(decoder.utf16_length());
+ decoder.Decode(&utf16[0], utf8_data);
+
+ return utf16;
+}
+
+} // namespace
+
class OffThreadFactoryTest : public TestWithIsolateAndZone {
public:
OffThreadFactoryTest()
- : TestWithIsolateAndZone(), off_thread_factory_(isolate()) {}
+ : TestWithIsolateAndZone(),
+ parse_info_(isolate()),
+ off_thread_isolate_(isolate(), parse_info_.zone()) {}
+
+ FunctionLiteral* ParseProgram(const char* source) {
+ auto utf16_source = DecodeUtf8(source);
- OffThreadFactory* off_thread_factory() { return &off_thread_factory_; }
+ // Normally this would be an external string or whatever, we don't have to
+ // worry about it for now.
+ source_string_ =
+ factory()->NewStringFromUtf8(CStrVector(source)).ToHandleChecked();
+
+ parse_info_.set_character_stream(
+ ScannerStream::ForTesting(utf16_source.data(), utf16_source.size()));
+ parse_info_.set_toplevel();
+ parse_info_.set_allow_lazy_parsing();
+
+ {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHeapAccess no_heap_access;
+
+ Parser parser(parse_info());
+ parser.InitializeEmptyScopeChain(parse_info());
+ parser.ParseOnBackground(parse_info());
+
+ CHECK(DeclarationScope::Analyze(parse_info()));
+ }
+
+ parse_info()->ast_value_factory()->Internalize(off_thread_isolate());
+ DeclarationScope::AllocateScopeInfos(parse_info(), off_thread_isolate());
+
+ script_ = parse_info_.CreateScript(off_thread_isolate(),
+ off_thread_factory()->empty_string(),
+ ScriptOriginOptions());
+
+ // Create the SFI list on the script so that SFI SetScript works.
+ Handle<WeakFixedArray> infos = off_thread_factory()->NewWeakFixedArray(
+ parse_info()->max_function_literal_id() + 1, AllocationType::kOld);
+ script_->set_shared_function_infos(*infos);
+
+ return parse_info()->literal();
+ }
+
+ ParseInfo* parse_info() { return &parse_info_; }
+
+ Handle<Script> script() { return script_; }
+
+ OffThreadIsolate* off_thread_isolate() { return &off_thread_isolate_; }
+ OffThreadFactory* off_thread_factory() {
+ return off_thread_isolate()->factory();
+ }
// We only internalize strings which are referred to in other slots, so create
// a wrapper pointing at the off_thread_string.
- OffThreadHandle<FixedArray> WrapString(OffThreadHandle<String> string) {
+ Handle<FixedArray> WrapString(Handle<String> string) {
// TODO(leszeks): Replace with a different factory method (e.g. FixedArray)
// once OffThreadFactory supports it.
return off_thread_factory()->StringWrapperForTest(string);
}
private:
- OffThreadFactory off_thread_factory_;
+ ParseInfo parse_info_;
+ OffThreadIsolate off_thread_isolate_;
+ Handle<String> source_string_;
+ Handle<Script> script_;
};
-TEST_F(OffThreadFactoryTest, HandleOrOffThreadHandle_IsNullWhenConstructed) {
- // Default constructed HandleOrOffThreadHandles should be considered both null
- // and uninitialized.
- EXPECT_TRUE(HandleOrOffThreadHandle<HeapObject>().is_null());
-#ifdef DEBUG
- EXPECT_TRUE(!HandleOrOffThreadHandle<HeapObject>().is_initialized());
-#endif
-
- // Default constructed HandleOrOffThreadHandles should work as both null
- // handles and null off-thread handles.
- EXPECT_TRUE(HandleOrOffThreadHandle<HeapObject>().get<Factory>().is_null());
- EXPECT_TRUE(
- HandleOrOffThreadHandle<HeapObject>().get<OffThreadFactory>().is_null());
-}
-
TEST_F(OffThreadFactoryTest, OneByteInternalizedString_IsAddedToStringTable) {
Vector<const uint8_t> string_vector = StaticCharVector("foo");
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
string_vector.begin(), string_vector.length(), HashSeed(isolate()));
- OffThreadHandle<String> off_thread_string =
- off_thread_factory()->NewOneByteInternalizedString(string_vector,
- hash_field);
+ FixedArray off_thread_wrapper;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
- OffThreadHandle<FixedArray> off_thread_wrapper =
- off_thread_factory()->StringWrapperForTest(off_thread_string);
+ Handle<String> off_thread_string =
+ off_thread_factory()->NewOneByteInternalizedString(string_vector,
+ hash_field);
- off_thread_factory()->FinishOffThread();
+ off_thread_wrapper =
+ *off_thread_factory()->StringWrapperForTest(off_thread_string);
+ off_thread_factory()->FinishOffThread();
+ }
- Handle<FixedArray> wrapper = handle(*off_thread_wrapper, isolate());
+ Handle<FixedArray> wrapper = handle(off_thread_wrapper, isolate());
off_thread_factory()->Publish(isolate());
Handle<String> string = handle(String::cast(wrapper->get(0)), isolate());
@@ -92,22 +166,25 @@ TEST_F(OffThreadFactoryTest,
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
string_vector.begin(), string_vector.length(), HashSeed(isolate()));
- OffThreadHandle<String> off_thread_string_1 =
- off_thread_factory()->NewOneByteInternalizedString(string_vector,
- hash_field);
- OffThreadHandle<String> off_thread_string_2 =
- off_thread_factory()->NewOneByteInternalizedString(string_vector,
- hash_field);
-
- OffThreadHandle<FixedArray> off_thread_wrapper_1 =
- WrapString(off_thread_string_1);
- OffThreadHandle<FixedArray> off_thread_wrapper_2 =
- WrapString(off_thread_string_2);
-
- off_thread_factory()->FinishOffThread();
+ FixedArray off_thread_wrapper_1;
+ FixedArray off_thread_wrapper_2;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
+
+ Handle<String> off_thread_string_1 =
+ off_thread_factory()->NewOneByteInternalizedString(string_vector,
+ hash_field);
+ Handle<String> off_thread_string_2 =
+ off_thread_factory()->NewOneByteInternalizedString(string_vector,
+ hash_field);
+
+ off_thread_wrapper_1 = *WrapString(off_thread_string_1);
+ off_thread_wrapper_2 = *WrapString(off_thread_string_2);
+ off_thread_factory()->FinishOffThread();
+ }
- Handle<FixedArray> wrapper_1 = handle(*off_thread_wrapper_1, isolate());
- Handle<FixedArray> wrapper_2 = handle(*off_thread_wrapper_2, isolate());
+ Handle<FixedArray> wrapper_1 = handle(off_thread_wrapper_1, isolate());
+ Handle<FixedArray> wrapper_2 = handle(off_thread_wrapper_2, isolate());
off_thread_factory()->Publish(isolate());
Handle<String> string_1 = handle(String::cast(wrapper_1->get(0)), isolate());
@@ -124,14 +201,17 @@ TEST_F(OffThreadFactoryTest, AstRawString_IsInternalized) {
const AstRawString* raw_string = ast_value_factory.GetOneByteString("foo");
- ast_value_factory.Internalize(off_thread_factory());
+ FixedArray off_thread_wrapper;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
- OffThreadHandle<FixedArray> off_thread_wrapper =
- WrapString(raw_string->string().get<OffThreadFactory>());
+ ast_value_factory.Internalize(off_thread_isolate());
- off_thread_factory()->FinishOffThread();
+ off_thread_wrapper = *WrapString(raw_string->string());
+ off_thread_factory()->FinishOffThread();
+ }
- Handle<FixedArray> wrapper = handle(*off_thread_wrapper, isolate());
+ Handle<FixedArray> wrapper = handle(off_thread_wrapper, isolate());
off_thread_factory()->Publish(isolate());
Handle<String> string = handle(String::cast(wrapper->get(0)), isolate());
@@ -144,20 +224,24 @@ TEST_F(OffThreadFactoryTest, AstConsString_CreatesConsString) {
AstValueFactory ast_value_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
- const AstRawString* foo_string = ast_value_factory.GetOneByteString("foo");
- const AstRawString* bar_string =
- ast_value_factory.GetOneByteString("bar-plus-padding-for-length");
- const AstConsString* foobar_string =
- ast_value_factory.NewConsString(foo_string, bar_string);
+ FixedArray off_thread_wrapper;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
- ast_value_factory.Internalize(off_thread_factory());
+ const AstRawString* foo_string = ast_value_factory.GetOneByteString("foo");
+ const AstRawString* bar_string =
+ ast_value_factory.GetOneByteString("bar-plus-padding-for-length");
+ AstConsString* foobar_string =
+ ast_value_factory.NewConsString(foo_string, bar_string);
- OffThreadHandle<FixedArray> off_thread_wrapper =
- WrapString(foobar_string->string().get<OffThreadFactory>());
+ ast_value_factory.Internalize(off_thread_isolate());
- off_thread_factory()->FinishOffThread();
+ off_thread_wrapper =
+ *WrapString(foobar_string->GetString(off_thread_isolate()));
+ off_thread_factory()->FinishOffThread();
+ }
- Handle<FixedArray> wrapper = handle(*off_thread_wrapper, isolate());
+ Handle<FixedArray> wrapper = handle(off_thread_wrapper, isolate());
off_thread_factory()->Publish(isolate());
Handle<String> string = handle(String::cast(wrapper->get(0)), isolate());
@@ -167,5 +251,111 @@ TEST_F(OffThreadFactoryTest, AstConsString_CreatesConsString) {
"foobar-plus-padding-for-length")));
}
+TEST_F(OffThreadFactoryTest, EmptyScript) {
+ FunctionLiteral* program = ParseProgram("");
+
+ SharedFunctionInfo shared;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
+
+ shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
+ program, script(), true);
+
+ off_thread_factory()->FinishOffThread();
+ }
+
+ Handle<SharedFunctionInfo> root_sfi = handle(shared, isolate());
+ off_thread_factory()->Publish(isolate());
+
+ EXPECT_EQ(root_sfi->function_literal_id(), 0);
+}
+
+TEST_F(OffThreadFactoryTest, LazyFunction) {
+ FunctionLiteral* program = ParseProgram("function lazy() {}");
+ FunctionLiteral* lazy = program->scope()
+ ->declarations()
+ ->AtForTest(0)
+ ->AsFunctionDeclaration()
+ ->fun();
+
+ SharedFunctionInfo shared;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
+
+ shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
+ lazy, script(), true);
+
+ off_thread_factory()->FinishOffThread();
+ }
+
+ Handle<SharedFunctionInfo> lazy_sfi = handle(shared, isolate());
+ off_thread_factory()->Publish(isolate());
+
+ EXPECT_EQ(lazy_sfi->function_literal_id(), 1);
+ EXPECT_TRUE(lazy_sfi->Name().IsOneByteEqualTo(CStrVector("lazy")));
+ EXPECT_FALSE(lazy_sfi->is_compiled());
+ EXPECT_TRUE(lazy_sfi->HasUncompiledDataWithoutPreparseData());
+}
+
+TEST_F(OffThreadFactoryTest, EagerFunction) {
+ FunctionLiteral* program = ParseProgram("(function eager() {})");
+ FunctionLiteral* eager = program->body()
+ ->at(0)
+ ->AsExpressionStatement()
+ ->expression()
+ ->AsFunctionLiteral();
+
+ SharedFunctionInfo shared;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
+
+ shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
+ eager, script(), true);
+
+ off_thread_factory()->FinishOffThread();
+ }
+
+ Handle<SharedFunctionInfo> eager_sfi = handle(shared, isolate());
+ off_thread_factory()->Publish(isolate());
+
+ EXPECT_EQ(eager_sfi->function_literal_id(), 1);
+ EXPECT_TRUE(eager_sfi->Name().IsOneByteEqualTo(CStrVector("eager")));
+ EXPECT_FALSE(eager_sfi->HasUncompiledData());
+ // TODO(leszeks): Allocate bytecode and enable these checks.
+ // EXPECT_TRUE(eager_sfi->is_compiled());
+ // EXPECT_TRUE(eager_sfi->HasBytecodeArray());
+}
+
+TEST_F(OffThreadFactoryTest, ImplicitNameFunction) {
+ FunctionLiteral* program = ParseProgram("let implicit_name = function() {}");
+ FunctionLiteral* implicit_name = program->body()
+ ->at(0)
+ ->AsBlock()
+ ->statements()
+ ->at(0)
+ ->AsExpressionStatement()
+ ->expression()
+ ->AsAssignment()
+ ->value()
+ ->AsFunctionLiteral();
+
+ SharedFunctionInfo shared;
+ {
+ OffThreadHandleScope handle_scope(off_thread_isolate());
+
+ shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
+ implicit_name, script(), true);
+
+ off_thread_factory()->FinishOffThread();
+ }
+
+ Handle<SharedFunctionInfo> implicit_name_sfi = handle(shared, isolate());
+ off_thread_factory()->Publish(isolate());
+
+ EXPECT_EQ(implicit_name_sfi->function_literal_id(), 1);
+ EXPECT_TRUE(
+ implicit_name_sfi->Name().IsOneByteEqualTo(CStrVector("implicit_name")));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/safepoint-unittest.cc b/deps/v8/test/unittests/heap/safepoint-unittest.cc
new file mode 100644
index 0000000000..462992f5fd
--- /dev/null
+++ b/deps/v8/test/unittests/heap/safepoint-unittest.cc
@@ -0,0 +1,139 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/safepoint.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+using SafepointTest = TestWithIsolate;
+
+TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) {
+ Heap* heap = i_isolate()->heap();
+ bool run = false;
+ {
+ SafepointScope scope(heap);
+ run = true;
+ }
+ CHECK(run);
+}
+
+class ParkedThread final : public v8::base::Thread {
+ public:
+ ParkedThread(Heap* heap, base::Mutex* mutex)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ mutex_(mutex) {}
+
+ void Run() override {
+ LocalHeap local_heap(heap_);
+
+ if (mutex_) {
+ ParkedScope scope(&local_heap);
+ base::MutexGuard guard(mutex_);
+ }
+ }
+
+ Heap* heap_;
+ base::Mutex* mutex_;
+};
+
+TEST_F(SafepointTest, StopParkedThreads) {
+ Heap* heap = i_isolate()->heap();
+
+ int safepoints = 0;
+
+ const int kThreads = 10;
+ const int kRuns = 5;
+
+ for (int run = 0; run < kRuns; run++) {
+ base::Mutex mutex;
+ std::vector<ParkedThread*> threads;
+
+ mutex.Lock();
+
+ for (int i = 0; i < kThreads; i++) {
+ ParkedThread* thread =
+ new ParkedThread(heap, i % 2 == 0 ? &mutex : nullptr);
+ CHECK(thread->Start());
+ threads.push_back(thread);
+ }
+
+ {
+ SafepointScope scope(heap);
+ safepoints++;
+ }
+ mutex.Unlock();
+
+ for (ParkedThread* thread : threads) {
+ thread->Join();
+ delete thread;
+ }
+ }
+
+ CHECK_EQ(safepoints, kRuns);
+}
+
+static const int kRuns = 10000;
+
+class RunningThread final : public v8::base::Thread {
+ public:
+ RunningThread(Heap* heap, std::atomic<int>* counter)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ counter_(counter) {}
+
+ void Run() override {
+ LocalHeap local_heap(heap_);
+
+ for (int i = 0; i < kRuns; i++) {
+ counter_->fetch_add(1);
+ if (i % 100 == 0) local_heap.Safepoint();
+ }
+ }
+
+ Heap* heap_;
+ std::atomic<int>* counter_;
+};
+
+TEST_F(SafepointTest, StopRunningThreads) {
+ Heap* heap = i_isolate()->heap();
+
+ const int kThreads = 10;
+ const int kRuns = 5;
+ const int kSafepoints = 3;
+ int safepoint_count = 0;
+
+ for (int run = 0; run < kRuns; run++) {
+ std::atomic<int> counter(0);
+ std::vector<RunningThread*> threads;
+
+ for (int i = 0; i < kThreads; i++) {
+ RunningThread* thread = new RunningThread(heap, &counter);
+ CHECK(thread->Start());
+ threads.push_back(thread);
+ }
+
+ for (int i = 0; i < kSafepoints; i++) {
+ SafepointScope scope(heap);
+ safepoint_count++;
+ }
+
+ for (RunningThread* thread : threads) {
+ thread->Join();
+ delete thread;
+ }
+ }
+
+ CHECK_EQ(safepoint_count, kRuns * kSafepoints);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
deleted file mode 100644
index 36d089f03b..0000000000
--- a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "src/common/globals.h"
-#include "src/heap/scavenge-job.h"
-#include "src/utils/utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-const size_t kScavengeSpeedInBytesPerMs = 500 * KB;
-const size_t kNewSpaceCapacity = 8 * MB;
-
-
-TEST(ScavengeJob, AllocationLimitEmptyNewSpace) {
- EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
- kScavengeSpeedInBytesPerMs, 0, kNewSpaceCapacity));
-}
-
-
-TEST(ScavengeJob, AllocationLimitFullNewSpace) {
- EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
- kScavengeSpeedInBytesPerMs, kNewSpaceCapacity, kNewSpaceCapacity));
-}
-
-
-TEST(ScavengeJob, AllocationLimitUnknownScavengeSpeed) {
- size_t expected_size = ScavengeJob::kInitialScavengeSpeedInBytesPerMs *
- ScavengeJob::kAverageIdleTimeMs -
- ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
- expected_size = Max(expected_size, ScavengeJob::kMinAllocationLimit);
-
- EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size - 1,
- kNewSpaceCapacity));
- EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size,
- kNewSpaceCapacity));
-}
-
-
-TEST(ScavengeJob, AllocationLimitLowScavengeSpeed) {
- size_t scavenge_speed = 1 * KB;
- EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
- scavenge_speed, ScavengeJob::kMinAllocationLimit - 1, kNewSpaceCapacity));
- EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
- scavenge_speed, ScavengeJob::kMinAllocationLimit, kNewSpaceCapacity));
-}
-
-
-TEST(ScavengeJob, AllocationLimitAverageScavengeSpeed) {
- size_t expected_size =
- kScavengeSpeedInBytesPerMs * ScavengeJob::kAverageIdleTimeMs -
- ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
- EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
- kScavengeSpeedInBytesPerMs, ScavengeJob::kMinAllocationLimit,
- kNewSpaceCapacity));
- EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
- kScavengeSpeedInBytesPerMs, expected_size - 1, kNewSpaceCapacity));
- EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
- kScavengeSpeedInBytesPerMs, expected_size, kNewSpaceCapacity));
-}
-
-
-TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
- size_t scavenge_speed = kNewSpaceCapacity;
- size_t expected_size =
- static_cast<size_t>(
- kNewSpaceCapacity *
- ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace) -
- ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
- EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
- scavenge_speed, expected_size - 1, kNewSpaceCapacity));
- EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
- scavenge_speed, expected_size + 1, kNewSpaceCapacity));
-}
-
-
-TEST(ScavengeJob, EnoughIdleTimeForScavengeUnknownScavengeSpeed) {
- size_t scavenge_speed = ScavengeJob::kInitialScavengeSpeedInBytesPerMs;
- size_t new_space_size = 1 * MB;
- size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
- EXPECT_TRUE(
- ScavengeJob::EnoughIdleTimeForScavenge(expected_time, 0, new_space_size));
- EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(expected_time - 1, 0,
- new_space_size));
-}
-
-
-TEST(ScavengeJob, EnoughIdleTimeForScavengeLowScavengeSpeed) {
- size_t scavenge_speed = 1 * KB;
- size_t new_space_size = 1 * MB;
- size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
- EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
- expected_time, scavenge_speed, new_space_size));
- EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
- expected_time - 1, scavenge_speed, new_space_size));
-}
-
-
-TEST(ScavengeJob, EnoughIdleTimeForScavengeHighScavengeSpeed) {
- size_t scavenge_speed = kNewSpaceCapacity;
- size_t new_space_size = 1 * MB;
- size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
- EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
- expected_time, scavenge_speed, new_space_size));
- EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
- expected_time - 1, scavenge_speed, new_space_size));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/worklist-unittest.cc b/deps/v8/test/unittests/heap/worklist-unittest.cc
index b7e1231424..a12dcb0cee 100644
--- a/deps/v8/test/unittests/heap/worklist-unittest.cc
+++ b/deps/v8/test/unittests/heap/worklist-unittest.cc
@@ -148,13 +148,16 @@ TEST(WorkListTest, LocalPushStaysPrivate) {
SomeObject dummy;
SomeObject* retrieved = nullptr;
EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_TRUE(worklist_view1.Push(&dummy));
EXPECT_FALSE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_FALSE(worklist_view2.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
}
TEST(WorkListTest, GlobalUpdateNull) {
@@ -168,6 +171,7 @@ TEST(WorkListTest, GlobalUpdateNull) {
EXPECT_TRUE(worklist_view.Push(object));
worklist.Update([](SomeObject* object, SomeObject** out) { return false; });
EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
}
TEST(WorkListTest, GlobalUpdate) {
@@ -209,6 +213,7 @@ TEST(WorkListTest, FlushToGlobalPushSegment) {
objectA = reinterpret_cast<SomeObject*>(&objectA);
EXPECT_TRUE(worklist_view0.Push(objectA));
worklist.FlushToGlobal(0);
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
EXPECT_TRUE(worklist_view1.Pop(&object));
}
@@ -223,6 +228,7 @@ TEST(WorkListTest, FlushToGlobalPopSegment) {
EXPECT_TRUE(worklist_view0.Push(objectA));
EXPECT_TRUE(worklist_view0.Pop(&object));
worklist.FlushToGlobal(0);
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
EXPECT_TRUE(worklist_view1.Pop(&object));
}
@@ -235,8 +241,10 @@ TEST(WorkListTest, Clear) {
EXPECT_TRUE(worklist_view.Push(object));
}
EXPECT_TRUE(worklist_view.Push(object));
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
worklist.Clear();
EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
}
TEST(WorkListTest, SingleSegmentSteal) {
@@ -252,6 +260,7 @@ TEST(WorkListTest, SingleSegmentSteal) {
EXPECT_TRUE(worklist_view1.Push(nullptr));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
// Stealing.
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
@@ -259,6 +268,7 @@ TEST(WorkListTest, SingleSegmentSteal) {
EXPECT_FALSE(worklist_view1.Pop(&retrieved));
}
EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
}
TEST(WorkListTest, MultipleSegmentsStolen) {
@@ -280,11 +290,13 @@ TEST(WorkListTest, MultipleSegmentsStolen) {
EXPECT_TRUE(worklist_view1.Push(&dummy3));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
EXPECT_EQ(&dummy3, retrieved);
+ EXPECT_EQ(2U, worklist.GlobalPoolSize());
// Stealing.
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
SomeObject* const expect_bag2 = retrieved;
EXPECT_TRUE(worklist_view3.Pop(&retrieved));
SomeObject* const expect_bag3 = retrieved;
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
EXPECT_NE(expect_bag2, expect_bag3);
EXPECT_TRUE(expect_bag2 == &dummy1 || expect_bag2 == &dummy2);
EXPECT_TRUE(expect_bag3 == &dummy1 || expect_bag3 == &dummy2);
@@ -313,10 +325,13 @@ TEST(WorkListTest, MergeGlobalPool) {
EXPECT_TRUE(worklist_view1.Push(nullptr));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
+ EXPECT_EQ(1U, worklist1.GlobalPoolSize());
// Merging global pool into a new Worklist.
TestWorklist worklist2;
TestWorklist::View worklist_view2(&worklist2, 0);
+ EXPECT_EQ(0U, worklist2.GlobalPoolSize());
worklist2.MergeGlobalPool(&worklist1);
+ EXPECT_EQ(1U, worklist2.GlobalPoolSize());
EXPECT_FALSE(worklist2.IsEmpty());
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 7591a30f6b..cc7ca63061 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -79,8 +79,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreAccumulatorInRegister(wide);
// Emit Ldar and Star taking care to foil the register optimizer.
- builder.StackCheck(0)
- .LoadAccumulatorWithRegister(other)
+ builder.LoadAccumulatorWithRegister(other)
.BinaryOperation(Token::ADD, reg, 1)
.StoreAccumulatorInRegister(reg)
.LoadNull();
@@ -312,7 +311,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.Bind(&after_jump10)
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_jump11)
.Bind(&after_jump11)
- .JumpLoop(&loop_header, 0)
+ .JumpLoop(&loop_header, 0, 0)
.Bind(&after_loop);
}
@@ -343,9 +342,6 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit set pending message bytecode.
builder.SetPendingMessage();
- // Emit stack check bytecode.
- builder.StackCheck(0);
-
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
BytecodeLabel after_throw, after_rethrow;
@@ -447,7 +443,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Generate BytecodeArray.
scope.SetScriptScopeInfo(factory->NewScopeInfo(1));
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
builder.total_register_count() * kSystemPointerSize);
@@ -535,7 +531,11 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
Register receiver(builder.Receiver());
Register param8(builder.Parameter(8));
+#ifdef V8_REVERSE_JSARGS
+ CHECK_EQ(receiver.index() - param8.index(), 9);
+#else
CHECK_EQ(param8.index() - receiver.index(), 9);
+#endif
}
TEST_F(BytecodeArrayBuilderTest, Constants) {
@@ -560,7 +560,7 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
.LoadLiteral(nan)
.Return();
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
// Should only have one entry for each identical constant.
EXPECT_EQ(4, array->constant_pool().length());
@@ -706,12 +706,14 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
BytecodeLoopHeader loop_header;
builder.JumpIfNull(&after_loop)
.Bind(&loop_header)
- .JumpLoop(&loop_header, 0)
+ .JumpLoop(&loop_header, 0, 0)
.Bind(&after_loop);
for (int i = 0; i < 42; i++) {
BytecodeLabel after_loop;
// Conditional jump to force the code after the JumpLoop to be live.
- builder.JumpIfNull(&after_loop).JumpLoop(&loop_header, 0).Bind(&after_loop);
+ builder.JumpIfNull(&after_loop)
+ .JumpLoop(&loop_header, 0, 0)
+ .Bind(&after_loop);
}
// Add padding to force wide backwards jumps.
@@ -719,7 +721,7 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
builder.Debugger();
}
- builder.JumpLoop(&loop_header, 0);
+ builder.JumpLoop(&loop_header, 0, 0);
builder.Bind(&end);
builder.Return();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 23f0d08c1c..5772b802c0 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -53,10 +53,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -73,7 +71,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.Return();
// Test iterator sees the expected output from the builder.
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
BytecodeArrayIterator iterator(builder.ToBytecodeArray(isolate()));
const int kPrefixByteSize = 1;
int offset = 0;
@@ -138,14 +136,6 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
- CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
- iterator.Advance();
-
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -164,14 +154,6 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
kPrefixByteSize;
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
- CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
- iterator.Advance();
-
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 9553058d8d..ecdf6757fb 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -49,10 +49,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -66,7 +64,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
.Debugger()
.Return();
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
@@ -103,10 +101,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -120,7 +116,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
.Debugger()
.Return();
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
@@ -157,10 +153,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -174,7 +168,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
.Debugger()
.Return();
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
@@ -216,10 +210,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -233,7 +225,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
.Debugger()
.Return();
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
@@ -242,7 +234,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
int offset = bytecodeArray->length() -
Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 22);
+ EXPECT_EQ(iterator.current_index(), 20);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -276,10 +268,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -294,13 +284,13 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
.Return();
// Test iterator sees the expected output from the builder.
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
BytecodeArrayRandomIterator iterator(builder.ToBytecodeArray(isolate()),
zone());
const int kPrefixByteSize = 1;
int offset = 0;
- iterator.GoToIndex(13);
+ iterator.GoToIndex(11);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
@@ -308,16 +298,14 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- EXPECT_EQ(iterator.current_index(), 13);
+ EXPECT_EQ(iterator.current_index(), 11);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -336,7 +324,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
heap_num_1);
ASSERT_TRUE(iterator.IsValid());
- iterator.GoToIndex(18);
+ iterator.GoToIndex(16);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
@@ -344,11 +332,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
@@ -358,7 +344,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
- EXPECT_EQ(iterator.current_index(), 18);
+ EXPECT_EQ(iterator.current_index(), 16);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
@@ -375,7 +361,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
- EXPECT_EQ(iterator.current_index(), 15);
+ EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -388,14 +374,14 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 17);
+ EXPECT_EQ(iterator.current_index(), 15);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
- iterator.GoToIndex(22);
+ iterator.GoToIndex(20);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
@@ -403,11 +389,9 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
@@ -422,12 +406,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 22);
+ EXPECT_EQ(iterator.current_index(), 20);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
- iterator.GoToIndex(24);
+ iterator.GoToIndex(22);
EXPECT_FALSE(iterator.IsValid());
iterator.GoToIndex(-5);
@@ -462,10 +446,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -480,7 +462,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
.Return();
// Test iterator sees the expected output from the builder.
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
BytecodeArrayRandomIterator iterator(builder.ToBytecodeArray(isolate()),
zone());
const int kPrefixByteSize = 1;
@@ -553,17 +535,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- EXPECT_EQ(iterator.current_index(), 7);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
- ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
- ++iterator;
-
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 8);
+ EXPECT_EQ(iterator.current_index(), 7);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -573,7 +546,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
- EXPECT_EQ(iterator.current_index(), 9);
+ EXPECT_EQ(iterator.current_index(), 8);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
@@ -582,17 +555,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
kPrefixByteSize;
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- EXPECT_EQ(iterator.current_index(), 10);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
- ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
- ++iterator;
-
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 11);
+ EXPECT_EQ(iterator.current_index(), 9);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -602,7 +566,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar);
- EXPECT_EQ(iterator.current_index(), 12);
+ EXPECT_EQ(iterator.current_index(), 10);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -611,7 +575,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- EXPECT_EQ(iterator.current_index(), 13);
+ EXPECT_EQ(iterator.current_index(), 11);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -621,7 +585,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 14);
+ EXPECT_EQ(iterator.current_index(), 12);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -631,7 +595,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
- EXPECT_EQ(iterator.current_index(), 15);
+ EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -642,7 +606,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- EXPECT_EQ(iterator.current_index(), 16);
+ EXPECT_EQ(iterator.current_index(), 14);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -652,7 +616,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 17);
+ EXPECT_EQ(iterator.current_index(), 15);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
@@ -662,7 +626,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
- EXPECT_EQ(iterator.current_index(), 18);
+ EXPECT_EQ(iterator.current_index(), 16);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
@@ -677,7 +641,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
- EXPECT_EQ(iterator.current_index(), 19);
+ EXPECT_EQ(iterator.current_index(), 17);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -688,7 +652,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
- EXPECT_EQ(iterator.current_index(), 20);
+ EXPECT_EQ(iterator.current_index(), 18);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
@@ -699,7 +663,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
- EXPECT_EQ(iterator.current_index(), 21);
+ EXPECT_EQ(iterator.current_index(), 19);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -707,7 +671,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 22);
+ EXPECT_EQ(iterator.current_index(), 20);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -743,10 +707,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
- .StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
@@ -761,7 +723,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
.Return();
// Test iterator sees the expected output from the builder.
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
const int kPrefixByteSize = 1;
@@ -771,7 +733,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- EXPECT_EQ(iterator.current_index(), 22);
+ EXPECT_EQ(iterator.current_index(), 20);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -779,7 +741,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
- EXPECT_EQ(iterator.current_index(), 21);
+ EXPECT_EQ(iterator.current_index(), 19);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
@@ -787,7 +749,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
- EXPECT_EQ(iterator.current_index(), 20);
+ EXPECT_EQ(iterator.current_index(), 18);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
@@ -798,7 +760,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
- EXPECT_EQ(iterator.current_index(), 19);
+ EXPECT_EQ(iterator.current_index(), 17);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -810,7 +772,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -=
Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
- EXPECT_EQ(iterator.current_index(), 18);
+ EXPECT_EQ(iterator.current_index(), 16);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
@@ -824,7 +786,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 17);
+ EXPECT_EQ(iterator.current_index(), 15);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
@@ -834,7 +796,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- EXPECT_EQ(iterator.current_index(), 16);
+ EXPECT_EQ(iterator.current_index(), 14);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -844,7 +806,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
- EXPECT_EQ(iterator.current_index(), 15);
+ EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -855,7 +817,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 14);
+ EXPECT_EQ(iterator.current_index(), 12);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -865,7 +827,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
- EXPECT_EQ(iterator.current_index(), 13);
+ EXPECT_EQ(iterator.current_index(), 11);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -875,7 +837,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar);
- EXPECT_EQ(iterator.current_index(), 12);
+ EXPECT_EQ(iterator.current_index(), 10);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -884,7 +846,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 11);
+ EXPECT_EQ(iterator.current_index(), 9);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
@@ -892,19 +854,10 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- EXPECT_EQ(iterator.current_index(), 10);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
- ASSERT_TRUE(iterator.IsValid());
- --iterator;
-
offset -= Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
- EXPECT_EQ(iterator.current_index(), 9);
+ EXPECT_EQ(iterator.current_index(), 8);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
@@ -913,7 +866,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
- EXPECT_EQ(iterator.current_index(), 8);
+ EXPECT_EQ(iterator.current_index(), 7);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
@@ -921,15 +874,6 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
- EXPECT_EQ(iterator.current_index(), 7);
- EXPECT_EQ(iterator.current_offset(), offset);
- EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
- ASSERT_TRUE(iterator.IsValid());
- --iterator;
-
offset -= Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_index(), 6);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 339fc33178..0bf431eaae 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -115,28 +115,24 @@ void BytecodeArrayWriterUnittest::WriteJumpLoop(Bytecode bytecode,
TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
CHECK_EQ(bytecodes()->size(), 0u);
- Write(Bytecode::kStackCheck, {10, false});
- CHECK_EQ(bytecodes()->size(), 1u);
-
Write(Bytecode::kLdaSmi, 127, {55, true});
- CHECK_EQ(bytecodes()->size(), 3u);
+ CHECK_EQ(bytecodes()->size(), 2u);
Write(Bytecode::kStar, Register(20).ToOperand());
- CHECK_EQ(bytecodes()->size(), 5u);
+ CHECK_EQ(bytecodes()->size(), 4u);
Write(Bytecode::kLdar, Register(200).ToOperand());
- CHECK_EQ(bytecodes()->size(), 9u);
+ CHECK_EQ(bytecodes()->size(), 8u);
Write(Bytecode::kReturn, {70, true});
- CHECK_EQ(bytecodes()->size(), 10u);
+ CHECK_EQ(bytecodes()->size(), 9u);
static const uint8_t expected_bytes[] = {
// clang-format off
- /* 0 10 E> */ B(StackCheck),
- /* 1 55 S> */ B(LdaSmi), U8(127),
- /* 3 */ B(Star), R8(20),
- /* 5 */ B(Wide), B(Ldar), R16(200),
- /* 9 70 S> */ B(Return),
+ /* 0 55 S> */ B(LdaSmi), U8(127),
+ /* 2 */ B(Star), R8(20),
+ /* 4 */ B(Wide), B(Ldar), R16(200),
+ /* 8 70 S> */ B(Return),
// clang-format on
};
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
@@ -150,8 +146,7 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
*writer()->ToSourcePositionTable(isolate()));
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
- PositionTableEntry expected_positions[] = {
- {0, 10, false}, {1, 55, true}, {9, 70, true}};
+ PositionTableEntry expected_positions[] = {{0, 55, true}, {8, 70, true}};
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -168,40 +163,37 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
static const uint8_t expected_bytes[] = {
// clang-format off
- /* 0 30 E> */ B(StackCheck),
- /* 1 42 S> */ B(LdaConstant), U8(0),
- /* 3 42 E> */ B(Add), R8(1), U8(1),
- /* 5 68 S> */ B(JumpIfUndefined), U8(39),
- /* 7 */ B(JumpIfNull), U8(37),
- /* 9 */ B(ToObject), R8(3),
- /* 11 */ B(ForInPrepare), R8(3), U8(4),
- /* 14 */ B(LdaZero),
- /* 15 */ B(Star), R8(7),
- /* 17 63 S> */ B(ForInContinue), R8(7), R8(6),
- /* 20 */ B(JumpIfFalse), U8(24),
- /* 22 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
- /* 27 */ B(JumpIfUndefined), U8(10),
- /* 29 */ B(Star), R8(0),
- /* 31 54 E> */ B(StackCheck),
- /* 32 */ B(Ldar), R8(0),
- /* 34 */ B(Star), R8(2),
- /* 36 85 S> */ B(Return),
- /* 37 */ B(ForInStep), R8(7),
- /* 39 */ B(Star), R8(7),
- /* 41 */ B(JumpLoop), U8(24), U8(0),
- /* 44 */ B(LdaUndefined),
- /* 45 85 S> */ B(Return),
+ /* 0 42 S> */ B(LdaConstant), U8(0),
+ /* 2 42 E> */ B(Add), R8(1), U8(1),
+ /* 4 68 S> */ B(JumpIfUndefined), U8(38),
+ /* 6 */ B(JumpIfNull), U8(36),
+ /* 8 */ B(ToObject), R8(3),
+ /* 10 */ B(ForInPrepare), R8(3), U8(4),
+ /* 13 */ B(LdaZero),
+ /* 14 */ B(Star), R8(7),
+ /* 16 63 S> */ B(ForInContinue), R8(7), R8(6),
+ /* 19 */ B(JumpIfFalse), U8(23),
+ /* 21 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
+ /* 26 */ B(JumpIfUndefined), U8(9),
+ /* 28 */ B(Star), R8(0),
+ /* 30 */ B(Ldar), R8(0),
+ /* 32 */ B(Star), R8(2),
+ /* 34 85 S> */ B(Return),
+ /* 35 */ B(ForInStep), R8(7),
+ /* 37 */ B(Star), R8(7),
+ /* 39 */ B(JumpLoop), U8(23), U8(0),
+ /* 42 */ B(LdaUndefined),
+ /* 43 85 S> */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
- {0, 30, false}, {1, 42, true}, {3, 42, false}, {6, 68, true},
- {18, 63, true}, {32, 54, false}, {37, 85, true}, {46, 85, true}};
+ {0, 42, true}, {2, 42, false}, {5, 68, true},
+ {17, 63, true}, {35, 85, true}, {44, 85, true}};
BytecodeLoopHeader loop_header;
BytecodeLabel jump_for_in, jump_end_1, jump_end_2, jump_end_3;
- Write(Bytecode::kStackCheck, {30, false});
Write(Bytecode::kLdaConstant, U8(0), {42, true});
Write(Bytecode::kAdd, R(1), U8(1), {42, false});
WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
@@ -216,7 +208,6 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in);
Write(Bytecode::kStar, R(0));
- Write(Bytecode::kStackCheck, {54, false});
Write(Bytecode::kLdar, R(0));
Write(Bytecode::kStar, R(2));
Write(Bytecode::kReturn, {85, true});
@@ -258,23 +249,18 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
static const uint8_t expected_bytes[] = {
// clang-format off
- /* 0 10 E> */ B(StackCheck),
- /* 1 55 S> */ B(Ldar), R8(20),
- /* 3 */ B(Star), R8(20),
- /* 5 */ B(CreateMappedArguments),
- /* 6 60 S> */ B(LdaSmi), U8(127),
- /* 8 70 S> */ B(Ldar), R8(20),
- /* 10 75 S> */ B(Return),
+ /* 0 55 S> */ B(Ldar), R8(20),
+ /* 2 */ B(Star), R8(20),
+ /* 4 */ B(CreateMappedArguments),
+ /* 5 60 S> */ B(LdaSmi), U8(127),
+ /* 7 70 S> */ B(Ldar), R8(20),
+ /* 9 75 S> */ B(Return),
// clang-format on
};
- static const PositionTableEntry expected_positions[] = {{0, 10, false},
- {1, 55, true},
- {6, 60, false},
- {8, 70, true},
- {10, 75, true}};
+ static const PositionTableEntry expected_positions[] = {
+ {0, 55, true}, {5, 60, false}, {7, 70, true}, {9, 75, true}};
- Write(Bytecode::kStackCheck, {10, false});
Write(Bytecode::kLdaSmi, 127, {55, true}); // Should be elided.
Write(Bytecode::kLdar, Register(20).ToOperand());
Write(Bytecode::kStar, Register(20).ToOperand());
@@ -310,27 +296,25 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
static const uint8_t expected_bytes[] = {
// clang-format off
- /* 0 10 E> */ B(StackCheck),
- /* 1 55 S> */ B(LdaSmi), U8(127),
- /* 3 */ B(Jump), U8(2),
- /* 5 65 S> */ B(LdaSmi), U8(127),
- /* 7 */ B(JumpIfFalse), U8(3),
- /* 9 75 S> */ B(Return),
- /* 10 */ B(JumpIfFalse), U8(3),
- /* 12 */ B(Throw),
- /* 13 */ B(JumpIfFalse), U8(3),
- /* 15 */ B(ReThrow),
- /* 16 */ B(Return),
+ /* 0 55 S> */ B(LdaSmi), U8(127),
+ /* 2 */ B(Jump), U8(2),
+ /* 4 65 S> */ B(LdaSmi), U8(127),
+ /* 6 */ B(JumpIfFalse), U8(3),
+ /* 8 75 S> */ B(Return),
+ /* 9 */ B(JumpIfFalse), U8(3),
+ /* 11 */ B(Throw),
+ /* 12 */ B(JumpIfFalse), U8(3),
+ /* 14 */ B(ReThrow),
+ /* 15 */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
- {0, 10, false}, {1, 55, true}, {5, 65, true}, {9, 75, true}};
+ {0, 55, true}, {4, 65, true}, {8, 75, true}};
BytecodeLabel after_jump, after_conditional_jump, after_return, after_throw,
after_rethrow;
- Write(Bytecode::kStackCheck, {10, false});
Write(Bytecode::kLdaSmi, 127, {55, true});
WriteJump(Bytecode::kJump, &after_jump);
Write(Bytecode::kLdaSmi, 127); // Dead code.
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 680479754a..bfe83b03ca 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -40,7 +40,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
builder.Insert(i + 0.5);
}
CHECK_EQ(builder.size(), k16BitCapacity);
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
for (size_t i = 0; i < k16BitCapacity; i++) {
CHECK_EQ(
Handle<HeapNumber>::cast(builder.At(i, isolate()).ToHandleChecked())
@@ -90,7 +90,7 @@ TEST_F(ConstantArrayBuilderTest, ToLargeFixedArrayWithReservations) {
for (int i = 0; i < kNumberOfElements; i++) {
builder.CommitReservedEntry(builder.CreateReservedEntry(), Smi::FromInt(i));
}
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
ASSERT_EQ(kNumberOfElements, constant_array->length());
for (int i = 0; i < kNumberOfElements; i++) {
@@ -149,7 +149,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
builder.DiscardReservedEntry(OperandSize::kByte);
}
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(),
static_cast<int>(2 * k8BitCapacity + reserved));
@@ -203,7 +203,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
CHECK_EQ(builder.size(), i + 1);
}
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(),
static_cast<int>(k8BitCapacity + reserved));
@@ -234,7 +234,7 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), static_cast<int>(2 * k8BitCapacity));
for (size_t i = 0; i < k8BitCapacity; i++) {
@@ -300,7 +300,7 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
builder.DiscardReservedEntry(OperandSize::kByte);
}
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), k8BitCapacity + 1);
for (int i = kNumberOfHoles; i < k8BitCapacity; i++) {
@@ -343,7 +343,7 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
builder.DiscardReservedEntry(OperandSize::kQuad);
}
- ast_factory.Internalize(isolate()->factory());
+ ast_factory.Internalize(isolate());
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 65537);
int count = 1;
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index fac266d3f9..59d9b4df78 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -235,8 +235,7 @@ class ValueSerializerTest : public TestWithIsolate {
}
Local<String> StringFromUtf8(const char* source) {
- return String::NewFromUtf8(isolate(), source, NewStringType::kNormal)
- .ToLocalChecked();
+ return String::NewFromUtf8(isolate(), source).ToLocalChecked();
}
std::string Utf8Value(Local<Value> value) {
@@ -2500,10 +2499,8 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
Maybe<uint32_t> GetWasmModuleTransferId(
Isolate* isolate, Local<WasmModuleObject> module) override {
isolate->ThrowException(Exception::Error(
- String::NewFromOneByte(
- isolate,
- reinterpret_cast<const uint8_t*>(kUnsupportedSerialization),
- NewStringType::kNormal)
+ String::NewFromOneByte(isolate, reinterpret_cast<const uint8_t*>(
+ kUnsupportedSerialization))
.ToLocalChecked()));
return Nothing<uint32_t>();
}
diff --git a/deps/v8/test/unittests/profiler/strings-storage-unittest.cc b/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
index 31225f46c2..087a7bf735 100644
--- a/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
+++ b/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
@@ -108,5 +108,54 @@ TEST_F(StringsStorageWithIsolate, FormatAndGetShareStorage) {
CHECK_EQ(stored_str, formatted_str);
}
+TEST_F(StringsStorageWithIsolate, Refcounting) {
+ StringsStorage storage;
+
+ const char* a = storage.GetCopy("12");
+ CHECK_EQ(storage.GetStringCountForTesting(), 1);
+
+ const char* b = storage.GetCopy("12");
+ CHECK_EQ(storage.GetStringCountForTesting(), 1);
+
+ // Ensure that we deduplicate the string.
+ CHECK_EQ(a, b);
+
+ CHECK(storage.Release(a));
+ CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK(storage.Release(b));
+ CHECK_EQ(storage.GetStringCountForTesting(), 0);
+#if !DEBUG
+ CHECK(!storage.Release("12"));
+#endif // !DEBUG
+
+ // Verify that other constructors refcount as intended.
+ const char* c = storage.GetFormatted("%d", 12);
+ CHECK_EQ(storage.GetStringCountForTesting(), 1);
+
+ const char* d = storage.GetName(12);
+ CHECK_EQ(storage.GetStringCountForTesting(), 1);
+
+ CHECK_EQ(c, d);
+
+ CHECK(storage.Release(c));
+ CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK(storage.Release(d));
+ CHECK_EQ(storage.GetStringCountForTesting(), 0);
+#if !DEBUG
+ CHECK(!storage.Release("12"));
+#endif // !DEBUG
+}
+
+TEST_F(StringsStorageWithIsolate, InvalidRelease) {
+ StringsStorage storage;
+
+ // If a refcount becomes invalid, throw in debug builds.
+#ifdef DEBUG
+ ASSERT_DEATH_IF_SUPPORTED(storage.Release("12"), "check failed");
+#else
+ CHECK(!storage.Release("12"));
+#endif // DEBUG
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 3746ee267e..c2ffbf1561 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -181,9 +181,8 @@ class WithContextMixin : public TMixin {
const Local<Context>& v8_context() const { return context_; }
Local<Value> RunJS(const char* source) {
- return RunJS(v8::String::NewFromUtf8(v8_isolate(), source,
- v8::NewStringType::kNormal)
- .ToLocalChecked());
+ return RunJS(
+ v8::String::NewFromUtf8(v8_isolate(), source).ToLocalChecked());
}
Local<Value> RunJS(v8::String::ExternalOneByteStringResource* source) {
@@ -192,9 +191,7 @@ class WithContextMixin : public TMixin {
}
v8::Local<v8::String> NewString(const char* string) {
- return v8::String::NewFromUtf8(v8_isolate(), string,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
+ return v8::String::NewFromUtf8(v8_isolate(), string).ToLocalChecked();
}
void SetGlobalProperty(const char* name, v8::Local<v8::Value> value) {
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 5180e301cc..778efc7641 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -25,6 +25,9 @@ namespace torque_internal {
const object: HeapObject;
const offset: intptr;
}
+ type ConstReference<T : type> extends Reference<T>;
+ type MutableReference<T : type> extends ConstReference<T>;
+
type UninitializedHeapObject extends HeapObject;
}
@@ -42,6 +45,7 @@ extern class HeapObject extends StrongTagged {
}
type Map extends HeapObject generates 'TNode<Map>';
type Object = Smi | HeapObject;
+type Number = Smi|HeapNumber;
type JSReceiver extends HeapObject generates 'TNode<JSReceiver>';
type JSObject extends JSReceiver generates 'TNode<JSObject>';
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
@@ -70,12 +74,18 @@ type Code extends HeapObject generates 'TNode<Code>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
type NativeContext extends Context;
+type SmiTagged<T : type extends uint31> extends Smi;
+type String extends HeapObject;
+type HeapNumber extends HeapObject;
+type FixedArrayBase extends HeapObject;
struct float64_or_hole {
is_hole: bool;
value: float64;
}
+extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
+
intrinsic %FromConstexpr<To: type, From: type>(b: From): To;
intrinsic %RawDownCast<To: type, From: type>(x: From): To;
intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
@@ -84,6 +94,7 @@ extern macro TaggedToSmi(Object): Smi
labels CastError;
extern macro TaggedToHeapObject(Object): HeapObject
labels CastError;
+extern macro Float64SilenceNaN(float64): float64;
extern macro IntPtrConstant(constexpr int31): intptr;
@@ -97,6 +108,9 @@ FromConstexpr<Smi, constexpr int31>(s: constexpr int31): Smi {
FromConstexpr<intptr, constexpr int31>(i: constexpr int31): intptr {
return IntPtrConstant(i);
}
+FromConstexpr<intptr, constexpr intptr>(i: constexpr intptr): intptr {
+ return %FromConstexpr<intptr>(i);
+}
macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A
labels CastError {
@@ -613,6 +627,134 @@ TEST(Torque, EnumInTypeswitch) {
)");
}
+TEST(Torque, ConstClassFields) {
+ ExpectSuccessfulCompilation(R"(
+ class Foo extends HeapObject {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ const _x: int32 = o.x;
+ o.y = n;
+ }
+ )");
+
+ ExpectFailingCompilation(R"(
+ class Foo extends HeapObject {
+ const x: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ o.x = n;
+ }
+ )",
+ HasSubstr("cannot assign to const value"));
+
+ ExpectSuccessfulCompilation(R"(
+ class Foo extends HeapObject {
+ s: Bar;
+ }
+ struct Bar {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ const _x: int32 = o.s.x;
+ // Assigning a struct as a value is OK, even when the struct contains
+ // const fields.
+ o.s = Bar{x: n, y: n};
+ o.s.y = n;
+ }
+ )");
+
+ ExpectFailingCompilation(R"(
+ class Foo extends HeapObject {
+ const s: Bar;
+ }
+ struct Bar {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ o.s.y = n;
+ }
+ )",
+ HasSubstr("cannot assign to const value"));
+
+ ExpectFailingCompilation(R"(
+ class Foo extends HeapObject {
+ s: Bar;
+ }
+ struct Bar {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ o.s.x = n;
+ }
+ )",
+ HasSubstr("cannot assign to const value"));
+}
+
+TEST(Torque, References) {
+ ExpectSuccessfulCompilation(R"(
+ class Foo extends HeapObject {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ const constRefX: const &int32 = &o.x;
+ const refY: &int32 = &o.y;
+ const constRefY: const &int32 = refY;
+ const _x: int32 = *constRefX;
+ const _y1: int32 = *refY;
+ const _y2: int32 = *constRefY;
+ *refY = n;
+ let r: const &int32 = constRefX;
+ r = constRefY;
+ }
+ )");
+
+ ExpectFailingCompilation(R"(
+ class Foo extends HeapObject {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo) {
+ const _refX: &int32 = &o.x;
+ }
+ )",
+ HasSubstr("cannot use expression of type const "
+ "&int32 as a value of type &int32"));
+
+ ExpectFailingCompilation(R"(
+ class Foo extends HeapObject {
+ const x: int32;
+ y: int32;
+ }
+
+ @export
+ macro Test(implicit context: Context)(o: Foo, n: int32) {
+ const constRefX: const &int32 = &o.x;
+ *constRefX = n;
+ }
+ )",
+ HasSubstr("cannot assign to const value"));
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index de3f825335..e242132a14 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -109,7 +109,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
// Prepends local variable declarations and renders nice error messages for
// verification failures.
template <typename Code = std::initializer_list<const byte>>
- void Validate(bool expected_success, FunctionSig* sig, Code&& raw_code,
+ void Validate(bool expected_success, const FunctionSig* sig, Code&& raw_code,
AppendEnd append_end = kAppendEnd,
const char* message = nullptr) {
Vector<const byte> code =
@@ -136,20 +136,20 @@ class FunctionBodyDecoderTest : public TestWithZone {
}
template <typename Code = std::initializer_list<const byte>>
- void ExpectValidates(FunctionSig* sig, Code&& raw_code,
+ void ExpectValidates(const FunctionSig* sig, Code&& raw_code,
AppendEnd append_end = kAppendEnd,
const char* message = nullptr) {
Validate(true, sig, std::forward<Code>(raw_code), append_end, message);
}
template <typename Code = std::initializer_list<const byte>>
- void ExpectFailure(FunctionSig* sig, Code&& raw_code,
+ void ExpectFailure(const FunctionSig* sig, Code&& raw_code,
AppendEnd append_end = kAppendEnd,
const char* message = nullptr) {
Validate(false, sig, std::forward<Code>(raw_code), append_end, message);
}
- void TestBinop(WasmOpcode opcode, FunctionSig* success) {
+ void TestBinop(WasmOpcode opcode, const FunctionSig* success) {
// op(local[0], local[1])
byte code[] = {WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
ExpectValidates(success, code);
@@ -171,7 +171,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
}
}
- void TestUnop(WasmOpcode opcode, FunctionSig* success) {
+ void TestUnop(WasmOpcode opcode, const FunctionSig* success) {
TestUnop(opcode, success->GetReturn(), success->GetParam(0));
}
@@ -215,22 +215,23 @@ class TestModuleBuilder {
CHECK_LE(mod.globals.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.globals.size() - 1);
}
- byte AddSignature(FunctionSig* sig) {
+ byte AddSignature(const FunctionSig* sig) {
mod.signatures.push_back(sig);
CHECK_LE(mod.signatures.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.signatures.size() - 1);
}
- byte AddFunction(FunctionSig* sig) {
- mod.functions.push_back({sig, // sig
- 0, // func_index
- 0, // sig_index
- {0, 0}, // code
- false, // import
- false}); // export
+ byte AddFunction(const FunctionSig* sig, bool declared = true) {
+ mod.functions.push_back({sig, // sig
+ 0, // func_index
+ 0, // sig_index
+ {0, 0}, // code
+ false, // import
+ false, // export
+ declared}); // declared
CHECK_LE(mod.functions.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.functions.size() - 1);
}
- byte AddImport(FunctionSig* sig) {
+ byte AddImport(const FunctionSig* sig) {
byte result = AddFunction(sig);
mod.functions[result].imported = true;
return result;
@@ -259,11 +260,16 @@ class TestModuleBuilder {
mod.maximum_pages = 100;
}
- void InitializeTable() { mod.tables.emplace_back(); }
+ byte InitializeTable(wasm::ValueType type) {
+ mod.tables.emplace_back();
+ mod.tables.back().type = type;
+ return static_cast<byte>(mod.tables.size() - 1);
+ }
- byte AddPassiveElementSegment() {
- mod.elem_segments.emplace_back();
+ byte AddPassiveElementSegment(wasm::ValueType type) {
+ mod.elem_segments.emplace_back(false);
auto& init = mod.elem_segments.back();
+ init.type = type;
// Add 5 empty elements.
for (uint32_t j = 0; j < 5; j++) {
init.entries.push_back(WasmElemSegment::kNullIndex);
@@ -271,6 +277,12 @@ class TestModuleBuilder {
return static_cast<byte>(mod.elem_segments.size() - 1);
}
+ byte AddDeclarativeElementSegment() {
+ mod.elem_segments.emplace_back(true);
+ mod.elem_segments.back().entries.push_back(WasmElemSegment::kNullIndex);
+ return static_cast<byte>(mod.elem_segments.size() - 1);
+ }
+
// Set the number of data segments as declared by the DataCount section.
void SetDataSegmentCount(uint32_t data_segment_count) {
// The Data section occurs after the Code section, so we don't need to
@@ -387,10 +399,8 @@ TEST_F(FunctionBodyDecoderTest, TooManyLocals) {
}
TEST_F(FunctionBodyDecoderTest, GetLocal0_param_n) {
- FunctionSig* array[] = {sigs.i_i(), sigs.i_ii(), sigs.i_iii()};
-
- for (size_t i = 0; i < arraysize(array); i++) {
- ExpectValidates(array[i], kCodeGetLocal0);
+ for (const FunctionSig* sig : {sigs.i_i(), sigs.i_ii(), sigs.i_iii()}) {
+ ExpectValidates(sig, kCodeGetLocal0);
}
}
@@ -1350,14 +1360,14 @@ TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
WASM_FEATURE_SCOPE(anyref);
// Test all simple expressions which are described by a signature.
-#define DECODE_TEST(name, opcode, sig) \
- { \
- FunctionSig* sig = WasmOpcodes::Signature(kExpr##name); \
- if (sig->parameter_count() == 1) { \
- TestUnop(kExpr##name, sig); \
- } else { \
- TestBinop(kExpr##name, sig); \
- } \
+#define DECODE_TEST(name, opcode, sig) \
+ { \
+ const FunctionSig* sig = WasmOpcodes::Signature(kExpr##name); \
+ if (sig->parameter_count() == 1) { \
+ TestUnop(kExpr##name, sig); \
+ } else { \
+ TestBinop(kExpr##name, sig); \
+ } \
}
FOREACH_SIMPLE_OPCODE(DECODE_TEST);
@@ -1475,7 +1485,7 @@ TEST_F(FunctionBodyDecoderTest, AllLoadMemCombinations) {
MachineType mem_type = machineTypes[j];
byte code[] = {WASM_LOAD_MEM(mem_type, WASM_ZERO)};
FunctionSig sig(1, 0, &local_type);
- Validate(local_type == ValueTypes::ValueTypeFor(mem_type), &sig, code);
+ Validate(local_type == ValueType::For(mem_type), &sig, code);
}
}
}
@@ -1490,13 +1500,13 @@ TEST_F(FunctionBodyDecoderTest, AllStoreMemCombinations) {
MachineType mem_type = machineTypes[j];
byte code[] = {WASM_STORE_MEM(mem_type, WASM_ZERO, WASM_GET_LOCAL(0))};
FunctionSig sig(0, 1, &local_type);
- Validate(local_type == ValueTypes::ValueTypeFor(mem_type), &sig, code);
+ Validate(local_type == ValueType::For(mem_type), &sig, code);
}
}
}
TEST_F(FunctionBodyDecoderTest, SimpleCalls) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1511,7 +1521,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleCalls) {
}
TEST_F(FunctionBodyDecoderTest, CallsWithTooFewArguments) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1525,7 +1535,7 @@ TEST_F(FunctionBodyDecoderTest, CallsWithTooFewArguments) {
}
TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs2) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1537,7 +1547,7 @@ TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs2) {
}
TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs3) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1557,7 +1567,7 @@ TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs3) {
TEST_F(FunctionBodyDecoderTest, SimpleReturnCalls) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1574,7 +1584,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleReturnCalls) {
TEST_F(FunctionBodyDecoderTest, ReturnCallsWithTooFewArguments) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1590,7 +1600,7 @@ TEST_F(FunctionBodyDecoderTest, ReturnCallsWithTooFewArguments) {
TEST_F(FunctionBodyDecoderTest, ReturnCallsWithMismatchedSigs) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1609,7 +1619,7 @@ TEST_F(FunctionBodyDecoderTest, ReturnCallsWithMismatchedSigs) {
TEST_F(FunctionBodyDecoderTest, SimpleIndirectReturnCalls) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.AddTable(kWasmFuncRef, 20, true, 30);
module = builder.module();
@@ -1628,7 +1638,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectReturnCalls) {
TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsOutOfBounds) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
@@ -1650,9 +1660,9 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsOutOfBounds) {
TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithMismatchedSigs3) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
byte sig0 = builder.AddSignature(sigs.i_f());
@@ -1681,7 +1691,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithMismatchedSigs3) {
TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithoutTableCrash) {
WASM_FEATURE_SCOPE(return_call);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1697,9 +1707,9 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithoutTableCrash) {
}
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectReturnCall) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
static byte code[] = {kExprReturnCallIndirect};
@@ -1739,8 +1749,8 @@ TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
ExpectValidates(&sig_cd_v, {WASM_CALL_FUNCTION0(0)});
- if (ValueTypes::IsSubType(kValueTypes[c], kValueTypes[a]) &&
- ValueTypes::IsSubType(kValueTypes[d], kValueTypes[b])) {
+ if (kValueTypes[c].IsSubTypeOf(kValueTypes[a]) &&
+ kValueTypes[d].IsSubTypeOf(kValueTypes[b])) {
ExpectValidates(&sig_ab_v, {WASM_CALL_FUNCTION0(0)});
} else {
ExpectFailure(&sig_ab_v, {WASM_CALL_FUNCTION0(0)});
@@ -1752,7 +1762,7 @@ TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
}
TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
@@ -1768,7 +1778,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
@@ -1785,9 +1795,9 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs3) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
byte sig0 = builder.AddSignature(sigs.i_f());
@@ -1808,7 +1818,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs3) {
}
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1823,9 +1833,9 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
}
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
static byte code[] = {kExprCallIndirect};
@@ -1833,10 +1843,10 @@ TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
}
TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.InitializeMemory();
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
static byte code[] = {kExprI32StoreMem};
@@ -1845,10 +1855,10 @@ TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
WASM_FEATURE_SCOPE(simd);
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.InitializeMemory();
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
static byte code[] = {kSimdPrefix,
@@ -1857,7 +1867,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
}
TEST_F(FunctionBodyDecoderTest, SimpleImportCalls) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1872,7 +1882,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleImportCalls) {
}
TEST_F(FunctionBodyDecoderTest, ImportCallsWithMismatchedSigs3) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1892,7 +1902,7 @@ TEST_F(FunctionBodyDecoderTest, ImportCallsWithMismatchedSigs3) {
}
TEST_F(FunctionBodyDecoderTest, Int32Globals) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1904,7 +1914,7 @@ TEST_F(FunctionBodyDecoderTest, Int32Globals) {
}
TEST_F(FunctionBodyDecoderTest, ImmutableGlobal) {
- FunctionSig* sig = sigs.v_v();
+ const FunctionSig* sig = sigs.v_v();
TestModuleBuilder builder;
module = builder.module();
@@ -1916,7 +1926,7 @@ TEST_F(FunctionBodyDecoderTest, ImmutableGlobal) {
}
TEST_F(FunctionBodyDecoderTest, Int32Globals_fail) {
- FunctionSig* sig = sigs.i_i();
+ const FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
module = builder.module();
@@ -1937,7 +1947,7 @@ TEST_F(FunctionBodyDecoderTest, Int32Globals_fail) {
}
TEST_F(FunctionBodyDecoderTest, Int64Globals) {
- FunctionSig* sig = sigs.l_l();
+ const FunctionSig* sig = sigs.l_l();
TestModuleBuilder builder;
module = builder.module();
@@ -1954,7 +1964,7 @@ TEST_F(FunctionBodyDecoderTest, Int64Globals) {
}
TEST_F(FunctionBodyDecoderTest, Float32Globals) {
- FunctionSig* sig = sigs.f_ff();
+ const FunctionSig* sig = sigs.f_ff();
TestModuleBuilder builder;
module = builder.module();
@@ -1966,7 +1976,7 @@ TEST_F(FunctionBodyDecoderTest, Float32Globals) {
}
TEST_F(FunctionBodyDecoderTest, Float64Globals) {
- FunctionSig* sig = sigs.d_dd();
+ const FunctionSig* sig = sigs.d_dd();
TestModuleBuilder builder;
module = builder.module();
@@ -1986,8 +1996,7 @@ TEST_F(FunctionBodyDecoderTest, AllGetGlobalCombinations) {
TestModuleBuilder builder;
module = builder.module();
builder.AddGlobal(global_type);
- Validate(ValueTypes::IsSubType(global_type, local_type), &sig,
- {WASM_GET_GLOBAL(0)});
+ Validate(global_type.IsSubTypeOf(local_type), &sig, {WASM_GET_GLOBAL(0)});
}
}
}
@@ -2001,7 +2010,7 @@ TEST_F(FunctionBodyDecoderTest, AllSetGlobalCombinations) {
TestModuleBuilder builder;
module = builder.module();
builder.AddGlobal(global_type);
- Validate(ValueTypes::IsSubType(local_type, global_type), &sig,
+ Validate(local_type.IsSubTypeOf(global_type), &sig,
{WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0))});
}
}
@@ -2181,7 +2190,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
FunctionSig sig_d_id(1, 2, float64int32float64);
struct {
WasmOpcode op;
- FunctionSig* sig;
+ const FunctionSig* sig;
} AsmJsBinOps[] = {
{kExprF64Atan2, sigs.d_dd()},
{kExprF64Pow, sigs.d_dd()},
@@ -2225,7 +2234,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsUnOpsCheckOrigin) {
FunctionSig sig_d_i(1, 1, float64int32);
struct {
WasmOpcode op;
- FunctionSig* sig;
+ const FunctionSig* sig;
} AsmJsUnOps[] = {{kExprF64Acos, sigs.d_d()},
{kExprF64Asin, sigs.d_d()},
{kExprF64Atan, sigs.d_d()},
@@ -2367,9 +2376,8 @@ TEST_F(FunctionBodyDecoderTest, BreakNesting_6_levels) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheck) {
- FunctionSig* sigarray[] = {sigs.i_i(), sigs.l_l(), sigs.f_ff(), sigs.d_dd()};
- for (size_t i = 0; i < arraysize(sigarray); i++) {
- FunctionSig* sig = sigarray[i];
+ for (const FunctionSig* sig :
+ {sigs.i_i(), sigs.l_l(), sigs.f_ff(), sigs.d_dd()}) {
// unify X and X => OK
byte code[] = {WASM_BLOCK_T(
sig->GetReturn(), WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
@@ -2398,8 +2406,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
sig.GetReturn(), WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1))};
- Validate(ValueTypes::IsSubType(kValueTypes[j], kValueTypes[i]), &sig,
- code);
+ Validate(kValueTypes[j].IsSubTypeOf(kValueTypes[i]), &sig, code);
}
}
}
@@ -2413,8 +2420,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll2) {
WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)),
WASM_GET_LOCAL(1))};
- Validate(ValueTypes::IsSubType(kValueTypes[j], kValueTypes[i]), &sig,
- code);
+ Validate(kValueTypes[j].IsSubTypeOf(kValueTypes[i]), &sig, code);
}
}
}
@@ -2428,8 +2434,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll3) {
WASM_GET_LOCAL(1),
WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))};
- Validate(ValueTypes::IsSubType(kValueTypes[j], kValueTypes[i]), &sig,
- code);
+ Validate(kValueTypes[j].IsSubTypeOf(kValueTypes[i]), &sig, code);
}
}
}
@@ -2473,8 +2478,7 @@ TEST_F(FunctionBodyDecoderTest, BreakIf_val_type) {
types[1], WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
WASM_DROP, WASM_GET_LOCAL(0))};
- Validate(ValueTypes::IsSubType(kValueTypes[j], kValueTypes[i]), &sig,
- code);
+ Validate(kValueTypes[j].IsSubTypeOf(kValueTypes[i]), &sig, code);
}
}
}
@@ -3203,8 +3207,8 @@ TEST_F(FunctionBodyDecoderTest, BulkMemoryOpsWithoutMemory) {
TEST_F(FunctionBodyDecoderTest, TableInit) {
TestModuleBuilder builder;
- builder.InitializeTable();
- builder.AddPassiveElementSegment();
+ builder.InitializeTable(wasm::kWasmFuncRef);
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
ExpectFailure(sigs.v_v(),
@@ -3216,10 +3220,22 @@ TEST_F(FunctionBodyDecoderTest, TableInit) {
{WASM_TABLE_INIT(0, 1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
+TEST_F(FunctionBodyDecoderTest, TableInitWrongType) {
+ TestModuleBuilder builder;
+ uint32_t table_index = builder.InitializeTable(wasm::kWasmFuncRef);
+ uint32_t element_index = builder.AddPassiveElementSegment(wasm::kWasmAnyRef);
+ module = builder.module();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_INIT(table_index, element_index,
+ WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+}
+
TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
TestModuleBuilder builder;
- builder.InitializeTable();
- builder.AddPassiveElementSegment();
+ builder.InitializeTable(wasm::kWasmFuncRef);
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
WASM_FEATURE_SCOPE(bulk_memory);
@@ -3232,8 +3248,8 @@ TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
TEST_F(FunctionBodyDecoderTest, ElemDrop) {
TestModuleBuilder builder;
- builder.InitializeTable();
- builder.AddPassiveElementSegment();
+ builder.InitializeTable(wasm::kWasmFuncRef);
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
@@ -3242,11 +3258,62 @@ TEST_F(FunctionBodyDecoderTest, ElemDrop) {
ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(1)});
}
+TEST_F(FunctionBodyDecoderTest, TableInitDeclarativeElem) {
+ TestModuleBuilder builder;
+ builder.InitializeTable(wasm::kWasmFuncRef);
+ builder.AddDeclarativeElementSegment();
+ module = builder.module();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ byte code[] = {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
+ WASM_END};
+ for (size_t i = 0; i <= arraysize(code); ++i) {
+ Validate(i == arraysize(code), sigs.v_v(), VectorOf(code, i), kOmitEnd);
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, DeclarativeElemDrop) {
+ TestModuleBuilder builder;
+ builder.InitializeTable(wasm::kWasmFuncRef);
+ builder.AddDeclarativeElementSegment();
+ module = builder.module();
+
+ ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectValidates(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(1)});
+}
+
+TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
+ TestModuleBuilder builder;
+ builder.InitializeTable(wasm::kWasmStmt);
+ byte function_index = builder.AddFunction(sigs.v_i());
+ module = builder.module();
+
+ ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectValidates(sigs.a_v(), {WASM_REF_FUNC(function_index)});
+}
+
+TEST_F(FunctionBodyDecoderTest, RefFuncUndeclared) {
+ TestModuleBuilder builder;
+ builder.InitializeTable(wasm::kWasmStmt);
+ byte function_index = builder.AddFunction(sigs.v_i(), false);
+ module = builder.module();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
+}
+
TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
TestModuleBuilder builder;
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmFuncRef);
for (int i = 0; i < 65; ++i) {
- builder.AddPassiveElementSegment();
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
}
module = builder.module();
@@ -3260,7 +3327,7 @@ TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
TEST_F(FunctionBodyDecoderTest, TableCopy) {
TestModuleBuilder builder;
- builder.InitializeTable();
+ builder.InitializeTable(wasm::kWasmStmt);
module = builder.module();
ExpectFailure(sigs.v_v(),
@@ -3270,6 +3337,18 @@ TEST_F(FunctionBodyDecoderTest, TableCopy) {
{WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
+TEST_F(FunctionBodyDecoderTest, TableCopyWrongType) {
+ TestModuleBuilder builder;
+ uint32_t dst_table_index = builder.InitializeTable(wasm::kWasmFuncRef);
+ uint32_t src_table_index = builder.InitializeTable(wasm::kWasmAnyRef);
+ module = builder.module();
+
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(dst_table_index, src_table_index,
+ WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+}
+
TEST_F(FunctionBodyDecoderTest, TableGrow) {
TestModuleBuilder builder;
byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
@@ -3369,7 +3448,7 @@ TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
}
{
WASM_FEATURE_SCOPE(bulk_memory);
- builder.AddPassiveElementSegment();
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
ExpectFailure(sigs.v_v(),
{WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
@@ -3383,7 +3462,7 @@ TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
{
TestModuleBuilder builder;
builder.AddTable(kWasmAnyRef, 10, true, 20);
- builder.AddPassiveElementSegment();
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
// We added one table, therefore table.copy on table 0 should work.
int table_src = 0;
@@ -3405,7 +3484,7 @@ TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
TestModuleBuilder builder;
builder.AddTable(kWasmAnyRef, 10, true, 20);
builder.AddTable(kWasmAnyRef, 10, true, 20);
- builder.AddPassiveElementSegment();
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
// We added two tables, therefore table.copy on table 0 should work.
int table_src = 0;
@@ -3433,7 +3512,7 @@ TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
{
TestModuleBuilder builder;
builder.AddTable(kWasmAnyRef, 10, true, 20);
- builder.AddPassiveElementSegment();
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
// We added one table, therefore table.init on table 0 should work.
int table_index = 0;
@@ -3448,7 +3527,7 @@ TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
TestModuleBuilder builder;
builder.AddTable(kWasmAnyRef, 10, true, 20);
builder.AddTable(kWasmAnyRef, 10, true, 20);
- builder.AddPassiveElementSegment();
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
module = builder.module();
// We added two tables, therefore table.init on table 0 should work.
int table_index = 0;
@@ -3727,8 +3806,7 @@ TEST_F(LocalDeclDecoderTest, OneLocal) {
WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
- const byte data[] = {1, 1,
- static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
+ const byte data[] = {1, 1, static_cast<byte>(type.value_type_code())};
BodyLocalDecls decls(zone());
bool result = DecodeLocalDecls(&decls, data, data + sizeof(data));
EXPECT_TRUE(result);
@@ -3743,8 +3821,7 @@ TEST_F(LocalDeclDecoderTest, FiveLocals) {
WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
- const byte data[] = {1, 5,
- static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
+ const byte data[] = {1, 5, static_cast<byte>(type.value_type_code())};
BodyLocalDecls decls(zone());
bool result = DecodeLocalDecls(&decls, data, data + sizeof(data));
EXPECT_TRUE(result);
@@ -3809,8 +3886,7 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
TEST_F(LocalDeclDecoderTest, ExnRef) {
WASM_FEATURE_SCOPE(eh);
ValueType type = kWasmExnRef;
- const byte data[] = {1, 1,
- static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
+ const byte data[] = {1, 1, static_cast<byte>(type.value_type_code())};
BodyLocalDecls decls(zone());
bool result = DecodeLocalDecls(&decls, data, data + sizeof(data));
EXPECT_TRUE(result);
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index f88beef794..68a2bb6ff1 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -130,6 +130,13 @@ struct CheckLEB1 : std::integral_constant<size_t, num> {
#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
+#define EXPECT_FAILURE_WITH_MSG(data, msg) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + sizeof(data)); \
+ EXPECT_FALSE(result.ok()); \
+ EXPECT_THAT(result.error().message(), HasSubstr(msg)); \
+ } while (false)
+
#define EXPECT_OFF_END_FAILURE(data, min) \
do { \
STATIC_ASSERT(min < arraysize(data)); \
@@ -252,6 +259,7 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -265,6 +273,16 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
kLocalAnyRef, // local type
0, // immutable
WASM_INIT_EXPR_REF_FUNC(1)), // init
+ SECTION(Element, // section name
+ ENTRY_COUNT(2), // entry count
+ DECLARATIVE, // flags 0
+ kExternalFunction, // type
+ ENTRY_COUNT(1), // func entry count
+ FUNC_INDEX(0), // func index
+ DECLARATIVE_WITH_ELEMENTS, // flags 1
+ kLocalFuncRef, // local type
+ ENTRY_COUNT(1), // func ref count
+ REF_FUNC_ELEMENT(1)), // func ref
TWO_EMPTY_BODIES};
{
@@ -290,6 +308,7 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -303,6 +322,16 @@ TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
kLocalFuncRef, // local type
0, // immutable
WASM_INIT_EXPR_REF_FUNC(1)), // init
+ SECTION(Element, // section name
+ ENTRY_COUNT(2), // entry count
+ DECLARATIVE, // flags 0
+ kExternalFunction, // type
+ ENTRY_COUNT(1), // func entry count
+ FUNC_INDEX(0), // func index
+ DECLARATIVE_WITH_ELEMENTS, // flags 1
+ kLocalFuncRef, // local type
+ ENTRY_COUNT(1), // func ref count
+ REF_FUNC_ELEMENT(1)), // func ref
TWO_EMPTY_BODIES};
{
// Should decode to two globals.
@@ -695,9 +724,9 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_attribute) {
}
TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
- static const byte data[] = {SECTION(Import, ENTRY_COUNT(0)),
+ static const byte data[] = {SECTION(Memory, ENTRY_COUNT(0)),
SECTION(Exception, ENTRY_COUNT(0)),
- SECTION(Export, ENTRY_COUNT(0))};
+ SECTION(Global, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
@@ -705,37 +734,37 @@ TEST_F(WasmModuleVerifyTest, ExceptionSectionCorrectPlacement) {
EXPECT_OK(result);
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterExport) {
- static const byte data[] = {SECTION(Export, ENTRY_COUNT(0)),
+TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterGlobal) {
+ static const byte data[] = {SECTION(Global, ENTRY_COUNT(0)),
SECTION(Exception, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result,
- "The Exception section must appear before the Export section");
+ "The Exception section must appear before the Global section");
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeGlobal) {
+TEST_F(WasmModuleVerifyTest, ExceptionSectionBeforeMemory) {
static const byte data[] = {SECTION(Exception, ENTRY_COUNT(0)),
- SECTION(Global, ENTRY_COUNT(0))};
+ SECTION(Memory, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "unexpected section <Global>");
+ EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
-TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterMemoryBeforeGlobal) {
+TEST_F(WasmModuleVerifyTest, ExceptionSectionAfterTableBeforeMemory) {
STATIC_ASSERT(kMemorySectionCode + 1 == kGlobalSectionCode);
- static const byte data[] = {SECTION(Memory, ENTRY_COUNT(0)),
+ static const byte data[] = {SECTION(Table, ENTRY_COUNT(0)),
SECTION(Exception, ENTRY_COUNT(0)),
- SECTION(Global, ENTRY_COUNT(0))};
+ SECTION(Memory, ENTRY_COUNT(0))};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_NOT_OK(result, "unexpected section <Global>");
+ EXPECT_NOT_OK(result, "unexpected section <Memory>");
}
TEST_F(WasmModuleVerifyTest, ExceptionImport) {
@@ -1493,7 +1522,7 @@ class WasmSignatureDecodeTest : public TestWithZone {
public:
WasmFeatures enabled_features_ = WasmFeatures::None();
- FunctionSig* DecodeSig(const byte* start, const byte* end) {
+ const FunctionSig* DecodeSig(const byte* start, const byte* end) {
return DecodeWasmSignatureForTesting(enabled_features_, zone(), start, end);
}
};
@@ -1502,7 +1531,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {SIG_ENTRY_v_v};
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
@@ -1514,7 +1543,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_x(ret_type.code)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
@@ -1528,7 +1557,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair param_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(1u, sig->parameter_count());
@@ -1544,7 +1573,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueTypePair param_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(1u, sig->parameter_count());
@@ -1564,7 +1593,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {
SIG_ENTRY_x_xx(kLocalI32, p0_type.code, p1_type.code)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(2u, sig->parameter_count());
@@ -1584,7 +1613,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_tt_tt) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_xx_xx(p0_type.code, p1_type.code,
p0_type.code, p1_type.code)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(2u, sig->parameter_count());
@@ -1601,7 +1630,7 @@ TEST_F(WasmSignatureDecodeTest, TooManyParams) {
static const byte data[] = {kWasmFunctionTypeCode,
WASM_I32V_3(kV8MaxWasmFunctionParams + 1),
kLocalI32, 0};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_FALSE(sig != nullptr);
}
@@ -1613,7 +1642,7 @@ TEST_F(WasmSignatureDecodeTest, TooManyReturns) {
enable_mv ? kV8MaxWasmFunctionMultiReturns : kV8MaxWasmFunctionReturns);
byte data[] = {kWasmFunctionTypeCode, 0, WASM_I32V_3(max_return_count + 1),
kLocalI32};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
@@ -1626,7 +1655,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
for (int i = 0; i < p + 1; i++) {
// Should fall off the end for all signatures.
- FunctionSig* sig = DecodeSig(data, data + i);
+ const FunctionSig* sig = DecodeSig(data, data + i);
EXPECT_EQ(nullptr, sig);
}
}
@@ -1641,7 +1670,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_anyref_without_flag) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
if (i >= arraysize(data)) break;
data[i] = invalid_type;
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
@@ -1653,26 +1682,26 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
if (i >= arraysize(data)) break;
data[i] = kInvalidType;
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kLocalVoid, kLocalI32)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kLocalI32, kLocalVoid)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
static const byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalVoid)};
- FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+ const FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
@@ -1724,12 +1753,22 @@ TEST_F(WasmModuleVerifyTest, SectionWithoutNameLength) {
EXPECT_FAILURE(data);
}
+TEST_F(WasmModuleVerifyTest, EmptyCustomSectionIsInvalid) {
+ // An empty custom section is invalid, because at least one byte for the
+ // length of the custom section name is required.
+ const byte data[] = {
+ 0, // unknown section code.
+ 0 // section length.
+ };
+ EXPECT_FAILURE(data);
+}
+
TEST_F(WasmModuleVerifyTest, TheLoneliestOfValidModulesTheTrulyEmptyOne) {
const byte data[] = {
0, // unknown section code.
- 0, // Empty section name.
- // No section name, no content, nothing but sadness.
- 0, // No section content.
+ 1, // section length, only one byte for the name length.
+ 0, // string length of 0.
+ // Empty section name, no content, nothing but sadness.
};
EXPECT_VERIFIES(data);
}
@@ -2507,7 +2546,7 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentWithIndices) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration -----------------------------------------------------
SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
- // element segments -----------------------------------------------------
+ // element segments ------------------------------------------------------
SECTION(Element, ENTRY_COUNT(1), PASSIVE, kExternalFunction,
ENTRY_COUNT(3), U32V_1(0), U32V_1(0), U32V_1(0)),
// code ------------------------------------------------------------------
@@ -2518,6 +2557,67 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentWithIndices) {
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
+TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentFuncRef) {
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // element segments -----------------------------------------------------
+ SECTION(Element, // section name
+ ENTRY_COUNT(1), // entry count
+ DECLARATIVE_WITH_ELEMENTS, // flags
+ kLocalFuncRef, // local type
+ U32V_1(0)), // func ref count
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(bulk_memory);
+ EXPECT_FAILURE(data);
+ WASM_FEATURE_SCOPE(anyref);
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentWithInvalidIndex) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // element segments -----------------------------------------------------
+ SECTION(Element, // section name
+ ENTRY_COUNT(1), // entry count
+ DECLARATIVE, // flags
+ kExternalFunction, // type
+ ENTRY_COUNT(2), // func index count
+ U32V_1(0), // func index
+ U32V_1(1)), // func index
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ EXPECT_FAILURE_WITH_MSG(data, "element function index 1 out of bounds");
+}
+
+TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentMissingForGlobal) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ static const byte data[] = {
+ // sig#0 -----------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs -----------------------------------------------------------------
+ ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
+ // global definitions ----------------------------------------------------
+ SECTION(Global, // section name
+ ENTRY_COUNT(1), // entry count
+ kLocalAnyRef, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_REF_FUNC(0)), // init
+ // code ------------------------------------------------------------------
+ ONE_EMPTY_BODY};
+ EXPECT_FAILURE_WITH_MSG(data, "undeclared reference to function");
+}
+
TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
static const byte data[] = {SECTION(Element, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0)),
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
index 4b9f78dfdc..0b8cf6d11d 100644
--- a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -21,9 +21,9 @@ TEST_F(WasmCallDescriptorTest, TestAnyRefIsGrouped) {
ValueType params[kMaxCount];
for (size_t i = 0; i < kMaxCount; i += 2) {
- params[i] = ValueType::kWasmAnyRef;
+ params[i] = kWasmAnyRef;
CHECK_LT(i + 1, kMaxCount);
- params[i + 1] = ValueType::kWasmI32;
+ params[i + 1] = kWasmI32;
}
for (size_t count = 1; count <= kMaxCount; ++count) {
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index b65f0c924f..a1ad6de73f 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-998b2870df79bf5ae83eb80aa6adb4ec3110d197 \ No newline at end of file
+6e091ed0e8ef57cdbfa46265b0001f19e3bb0f20 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index af13e5a1b3..b8fe4c068a 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -4,9 +4,12 @@
[
[ALWAYS, {
- # https://bugs.chromium.org/p/v8/issues/detail?id=8633
- 'limits': [SKIP],
+ # These are slow, and not useful to run for the proposals:
'proposals/reference-types/limits': [SKIP],
+ 'proposals/multi-value/limits': [SKIP],
+ 'proposals/bulk-memory-operations/limits': [SKIP],
+ 'proposals/JS-BigInt-integration/limits': [SKIP],
+ 'proposals/js-types/limits': [SKIP],
# TODO(v8:9673): Enable these spec tests once they exist, and the out-dated
# tests have been removed.
'proposals/JS-BigInt-integration/module/params-long': [FAIL],
@@ -18,6 +21,12 @@
'constructor/instantiate': [SKIP],
}], # 'arch == s390 or arch == s390x or system == aix'
+['mode == debug or simulator_run or variant != default or arch == arm', {
+ # Slow, and we always have the same limits anyway.
+ # ODroid bots don't have enough memory to run the test.
+ 'limits': [SKIP],
+}], # mode == debug or simulator_run or variant != default or arch == arm
+
##############################################################################
['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index b905b5674e..9f6ff04f64 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-61d66d9a4019b8799d4be29b82424c5e5a4fb8b2 \ No newline at end of file
+acdfb34955e3b0e5031890aebaf552782f38f0f3 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index ccf27ed6a7..84daefc36d 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -24,15 +24,6 @@
'proposals/multi-value/call': [FAIL],
'proposals/multi-value/if': [FAIL],
'proposals/multi-value/func': [FAIL],
-
- # TODO(v8:10156): Spec tests are failing after rebasing the reference-types
- # proposal on the bulk-operations proposal.
- 'proposals/reference-types/elem': [FAIL],
- 'proposals/reference-types/ref_func': [FAIL],
- 'proposals/reference-types/table_fill': [FAIL],
- 'proposals/reference-types/table_grow': [FAIL],
- 'proposals/reference-types/select': [FAIL],
- 'proposals/reference-types/br_table': [FAIL],
}], # ALWAYS
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
diff --git a/deps/v8/third_party/inspector_protocol/BUILD.gn b/deps/v8/third_party/inspector_protocol/BUILD.gn
index 9fcba22e04..33ef78c24b 100644
--- a/deps/v8/third_party/inspector_protocol/BUILD.gn
+++ b/deps/v8/third_party/inspector_protocol/BUILD.gn
@@ -14,7 +14,12 @@ v8_source_set("crdtp") {
sources = [
"crdtp/cbor.cc",
"crdtp/cbor.h",
+ "crdtp/dispatch.cc",
+ "crdtp/dispatch.h",
+ "crdtp/error_support.cc",
+ "crdtp/error_support.h",
"crdtp/export.h",
+ "crdtp/find_by_first.h",
"crdtp/glue.h",
"crdtp/json.cc",
"crdtp/json.h",
@@ -22,14 +27,13 @@ v8_source_set("crdtp") {
"crdtp/serializable.cc",
"crdtp/serializable.h",
"crdtp/serializer_traits.h",
+ "crdtp/span.cc",
"crdtp/span.h",
"crdtp/status.cc",
"crdtp/status.h",
]
configs = [ ":crdtp_config" ]
- deps = [
- ":crdtp_platform",
- ]
+ deps = [ ":crdtp_platform" ]
}
# A small adapter library which only :crdtp may depend on.
@@ -39,9 +43,7 @@ v8_source_set("crdtp_platform") {
"crdtp/json_platform.h",
"crdtp/json_platform_v8.cc",
]
- public_deps = [
- "../..:v8_libbase",
- ]
+ public_deps = [ "../..:v8_libbase" ]
configs = [ ":crdtp_config" ]
}
@@ -49,6 +51,9 @@ v8_source_set("crdtp_platform") {
v8_source_set("crdtp_test") {
sources = [
"crdtp/cbor_test.cc",
+ "crdtp/dispatch_test.cc",
+ "crdtp/error_support_test.cc",
+ "crdtp/find_by_first_test.cc",
"crdtp/glue_test.cc",
"crdtp/json_test.cc",
"crdtp/serializable_test.cc",
@@ -59,9 +64,7 @@ v8_source_set("crdtp_test") {
"crdtp/status_test_support.h",
]
configs = [ ":crdtp_config" ]
- deps = [
- ":crdtp_test_platform",
- ]
+ deps = [ ":crdtp_test_platform" ]
testonly = true
}
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index d9b68c0b17..d2d558ad3e 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 0213a8545f6362cd1cd5091cedf29747736552e8
+Revision: b7cda08cd6e522df2159413ba5f29d2a953cc1c4
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index 4260dbe5a4..92207b9159 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -655,26 +655,19 @@ def main():
# Note these should be sorted in the right order.
# TODO(dgozman): sort them programmatically based on commented includes.
protocol_h_templates = [
- "ErrorSupport_h.template",
"Values_h.template",
"Object_h.template",
"ValueConversions_h.template",
- "DispatcherBase_h.template",
- "Parser_h.template",
]
protocol_cpp_templates = [
"Protocol_cpp.template",
- "ErrorSupport_cpp.template",
"Values_cpp.template",
"Object_cpp.template",
- "DispatcherBase_cpp.template",
- "Parser_cpp.template",
]
forward_h_templates = [
"Forward_h.template",
- "FrontendChannel_h.template",
]
base_string_adapter_h_templates = [
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/dispatch.cc b/deps/v8/third_party/inspector_protocol/crdtp/dispatch.cc
new file mode 100644
index 0000000000..eb2b2d4fdc
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/dispatch.cc
@@ -0,0 +1,576 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dispatch.h"
+
+#include <cassert>
+#include "cbor.h"
+#include "error_support.h"
+#include "find_by_first.h"
+#include "frontend_channel.h"
+
+namespace v8_crdtp {
+// =============================================================================
+// DispatchResponse - Error status and chaining / fall through
+// =============================================================================
+
+// static
+DispatchResponse DispatchResponse::Success() {
+ DispatchResponse result;
+ result.code_ = DispatchCode::SUCCESS;
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::FallThrough() {
+ DispatchResponse result;
+ result.code_ = DispatchCode::FALL_THROUGH;
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::ParseError(std::string message) {
+ DispatchResponse result;
+ result.code_ = DispatchCode::PARSE_ERROR;
+ result.message_ = std::move(message);
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::InvalidRequest(std::string message) {
+ DispatchResponse result;
+ result.code_ = DispatchCode::INVALID_REQUEST;
+ result.message_ = std::move(message);
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::MethodNotFound(std::string message) {
+ DispatchResponse result;
+ result.code_ = DispatchCode::METHOD_NOT_FOUND;
+ result.message_ = std::move(message);
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::InvalidParams(std::string message) {
+ DispatchResponse result;
+ result.code_ = DispatchCode::INVALID_PARAMS;
+ result.message_ = std::move(message);
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::InternalError() {
+ DispatchResponse result;
+ result.code_ = DispatchCode::INTERNAL_ERROR;
+ result.message_ = "Internal error";
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::ServerError(std::string message) {
+ DispatchResponse result;
+ result.code_ = DispatchCode::SERVER_ERROR;
+ result.message_ = std::move(message);
+ return result;
+}
+
+// =============================================================================
+// Dispatchable - a shallow parser for CBOR encoded DevTools messages
+// =============================================================================
+namespace {
+constexpr size_t kEncodedEnvelopeHeaderSize = 1 + 1 + sizeof(uint32_t);
+} // namespace
+
+Dispatchable::Dispatchable(span<uint8_t> serialized) : serialized_(serialized) {
+ Status s = cbor::CheckCBORMessage(serialized);
+ if (!s.ok()) {
+ status_ = {Error::MESSAGE_MUST_BE_AN_OBJECT, s.pos};
+ return;
+ }
+ cbor::CBORTokenizer tokenizer(serialized);
+ if (tokenizer.TokenTag() == cbor::CBORTokenTag::ERROR_VALUE) {
+ status_ = tokenizer.Status();
+ return;
+ }
+
+ // We checked for the envelope start byte above, so the tokenizer
+ // must agree here, since it's not an error.
+ assert(tokenizer.TokenTag() == cbor::CBORTokenTag::ENVELOPE);
+
+ // Before we enter the envelope, we save the position that we
+ // expect to see after we're done parsing the envelope contents.
+ // This way we can compare and produce an error if the contents
+ // didn't fit exactly into the envelope length.
+ const size_t pos_past_envelope = tokenizer.Status().pos +
+ kEncodedEnvelopeHeaderSize +
+ tokenizer.GetEnvelopeContents().size();
+ tokenizer.EnterEnvelope();
+ if (tokenizer.TokenTag() == cbor::CBORTokenTag::ERROR_VALUE) {
+ status_ = tokenizer.Status();
+ return;
+ }
+ if (tokenizer.TokenTag() != cbor::CBORTokenTag::MAP_START) {
+ status_ = {Error::MESSAGE_MUST_BE_AN_OBJECT, tokenizer.Status().pos};
+ return;
+ }
+ assert(tokenizer.TokenTag() == cbor::CBORTokenTag::MAP_START);
+ tokenizer.Next(); // Now we should be pointed at the map key.
+ while (tokenizer.TokenTag() != cbor::CBORTokenTag::STOP) {
+ switch (tokenizer.TokenTag()) {
+ case cbor::CBORTokenTag::DONE:
+ status_ =
+ Status{Error::CBOR_UNEXPECTED_EOF_IN_MAP, tokenizer.Status().pos};
+ return;
+ case cbor::CBORTokenTag::ERROR_VALUE:
+ status_ = tokenizer.Status();
+ return;
+ case cbor::CBORTokenTag::STRING8:
+ if (!MaybeParseProperty(&tokenizer))
+ return;
+ break;
+ default:
+ // We require the top-level keys to be UTF8 (US-ASCII in practice).
+ status_ = Status{Error::CBOR_INVALID_MAP_KEY, tokenizer.Status().pos};
+ return;
+ }
+ }
+ tokenizer.Next();
+ if (!has_call_id_) {
+ status_ = Status{Error::MESSAGE_MUST_HAVE_INTEGER_ID_PROPERTY,
+ tokenizer.Status().pos};
+ return;
+ }
+ if (method_.empty()) {
+ status_ = Status{Error::MESSAGE_MUST_HAVE_STRING_METHOD_PROPERTY,
+ tokenizer.Status().pos};
+ return;
+ }
+ // The contents of the envelope parsed OK, now check that we're at
+ // the expected position.
+ if (pos_past_envelope != tokenizer.Status().pos) {
+ status_ = Status{Error::CBOR_ENVELOPE_CONTENTS_LENGTH_MISMATCH,
+ tokenizer.Status().pos};
+ return;
+ }
+ if (tokenizer.TokenTag() != cbor::CBORTokenTag::DONE) {
+ status_ = Status{Error::CBOR_TRAILING_JUNK, tokenizer.Status().pos};
+ return;
+ }
+}
+
+bool Dispatchable::ok() const {
+ return status_.ok();
+}
+
+DispatchResponse Dispatchable::DispatchError() const {
+ // TODO(johannes): Replace with DCHECK / similar?
+ if (status_.ok())
+ return DispatchResponse::Success();
+
+ if (status_.IsMessageError())
+ return DispatchResponse::InvalidRequest(status_.Message());
+ return DispatchResponse::ParseError(status_.ToASCIIString());
+}
+
+bool Dispatchable::MaybeParseProperty(cbor::CBORTokenizer* tokenizer) {
+ span<uint8_t> property_name = tokenizer->GetString8();
+ if (SpanEquals(SpanFrom("id"), property_name))
+ return MaybeParseCallId(tokenizer);
+ if (SpanEquals(SpanFrom("method"), property_name))
+ return MaybeParseMethod(tokenizer);
+ if (SpanEquals(SpanFrom("params"), property_name))
+ return MaybeParseParams(tokenizer);
+ if (SpanEquals(SpanFrom("sessionId"), property_name))
+ return MaybeParseSessionId(tokenizer);
+ status_ =
+ Status{Error::MESSAGE_HAS_UNKNOWN_PROPERTY, tokenizer->Status().pos};
+ return false;
+}
+
+bool Dispatchable::MaybeParseCallId(cbor::CBORTokenizer* tokenizer) {
+ if (has_call_id_) {
+ status_ = Status{Error::CBOR_DUPLICATE_MAP_KEY, tokenizer->Status().pos};
+ return false;
+ }
+ tokenizer->Next();
+ if (tokenizer->TokenTag() != cbor::CBORTokenTag::INT32) {
+ status_ = Status{Error::MESSAGE_MUST_HAVE_INTEGER_ID_PROPERTY,
+ tokenizer->Status().pos};
+ return false;
+ }
+ call_id_ = tokenizer->GetInt32();
+ has_call_id_ = true;
+ tokenizer->Next();
+ return true;
+}
+
+bool Dispatchable::MaybeParseMethod(cbor::CBORTokenizer* tokenizer) {
+ if (!method_.empty()) {
+ status_ = Status{Error::CBOR_DUPLICATE_MAP_KEY, tokenizer->Status().pos};
+ return false;
+ }
+ tokenizer->Next();
+ if (tokenizer->TokenTag() != cbor::CBORTokenTag::STRING8) {
+ status_ = Status{Error::MESSAGE_MUST_HAVE_STRING_METHOD_PROPERTY,
+ tokenizer->Status().pos};
+ return false;
+ }
+ method_ = tokenizer->GetString8();
+ tokenizer->Next();
+ return true;
+}
+
+bool Dispatchable::MaybeParseParams(cbor::CBORTokenizer* tokenizer) {
+ if (params_seen_) {
+ status_ = Status{Error::CBOR_DUPLICATE_MAP_KEY, tokenizer->Status().pos};
+ return false;
+ }
+ params_seen_ = true;
+ tokenizer->Next();
+ if (tokenizer->TokenTag() == cbor::CBORTokenTag::NULL_VALUE) {
+ tokenizer->Next();
+ return true;
+ }
+ if (tokenizer->TokenTag() != cbor::CBORTokenTag::ENVELOPE) {
+ status_ = Status{Error::MESSAGE_MAY_HAVE_OBJECT_PARAMS_PROPERTY,
+ tokenizer->Status().pos};
+ return false;
+ }
+ params_ = tokenizer->GetEnvelope();
+ tokenizer->Next();
+ return true;
+}
+
+bool Dispatchable::MaybeParseSessionId(cbor::CBORTokenizer* tokenizer) {
+ if (!session_id_.empty()) {
+ status_ = Status{Error::CBOR_DUPLICATE_MAP_KEY, tokenizer->Status().pos};
+ return false;
+ }
+ tokenizer->Next();
+ if (tokenizer->TokenTag() != cbor::CBORTokenTag::STRING8) {
+ status_ = Status{Error::MESSAGE_MAY_HAVE_STRING_SESSION_ID_PROPERTY,
+ tokenizer->Status().pos};
+ return false;
+ }
+ session_id_ = tokenizer->GetString8();
+ tokenizer->Next();
+ return true;
+}
+
+namespace {
+class ProtocolError : public Serializable {
+ public:
+ explicit ProtocolError(DispatchResponse dispatch_response)
+ : dispatch_response_(std::move(dispatch_response)) {}
+
+ void AppendSerialized(std::vector<uint8_t>* out) const override {
+ Status status;
+ std::unique_ptr<ParserHandler> encoder = cbor::NewCBOREncoder(out, &status);
+ encoder->HandleMapBegin();
+ if (has_call_id_) {
+ encoder->HandleString8(SpanFrom("id"));
+ encoder->HandleInt32(call_id_);
+ }
+ encoder->HandleString8(SpanFrom("error"));
+ encoder->HandleMapBegin();
+ encoder->HandleString8(SpanFrom("code"));
+ encoder->HandleInt32(static_cast<int32_t>(dispatch_response_.Code()));
+ encoder->HandleString8(SpanFrom("message"));
+ encoder->HandleString8(SpanFrom(dispatch_response_.Message()));
+ if (!data_.empty()) {
+ encoder->HandleString8(SpanFrom("data"));
+ encoder->HandleString8(SpanFrom(data_));
+ }
+ encoder->HandleMapEnd();
+ encoder->HandleMapEnd();
+ assert(status.ok());
+ }
+
+ void SetCallId(int call_id) {
+ has_call_id_ = true;
+ call_id_ = call_id;
+ }
+ void SetData(std::string data) { data_ = std::move(data); }
+
+ private:
+ const DispatchResponse dispatch_response_;
+ std::string data_;
+ int call_id_ = 0;
+ bool has_call_id_ = false;
+};
+} // namespace
+
+// =============================================================================
+// Helpers for creating protocol cresponses and notifications.
+// =============================================================================
+
+std::unique_ptr<Serializable> CreateErrorResponse(
+ int call_id,
+ DispatchResponse dispatch_response,
+ const ErrorSupport* errors) {
+ auto protocol_error =
+ std::make_unique<ProtocolError>(std::move(dispatch_response));
+ protocol_error->SetCallId(call_id);
+ if (errors && !errors->Errors().empty()) {
+ protocol_error->SetData(
+ std::string(errors->Errors().begin(), errors->Errors().end()));
+ }
+ return protocol_error;
+}
+
+std::unique_ptr<Serializable> CreateErrorNotification(
+ DispatchResponse dispatch_response) {
+ return std::make_unique<ProtocolError>(std::move(dispatch_response));
+}
+
+namespace {
+class Response : public Serializable {
+ public:
+ Response(int call_id, std::unique_ptr<Serializable> params)
+ : call_id_(call_id), params_(std::move(params)) {}
+
+ void AppendSerialized(std::vector<uint8_t>* out) const override {
+ Status status;
+ std::unique_ptr<ParserHandler> encoder = cbor::NewCBOREncoder(out, &status);
+ encoder->HandleMapBegin();
+ encoder->HandleString8(SpanFrom("id"));
+ encoder->HandleInt32(call_id_);
+ encoder->HandleString8(SpanFrom("result"));
+ if (params_) {
+ params_->AppendSerialized(out);
+ } else {
+ encoder->HandleMapBegin();
+ encoder->HandleMapEnd();
+ }
+ encoder->HandleMapEnd();
+ assert(status.ok());
+ }
+
+ private:
+ const int call_id_;
+ std::unique_ptr<Serializable> params_;
+};
+
+class Notification : public Serializable {
+ public:
+ Notification(const char* method, std::unique_ptr<Serializable> params)
+ : method_(method), params_(std::move(params)) {}
+
+ void AppendSerialized(std::vector<uint8_t>* out) const override {
+ Status status;
+ std::unique_ptr<ParserHandler> encoder = cbor::NewCBOREncoder(out, &status);
+ encoder->HandleMapBegin();
+ encoder->HandleString8(SpanFrom("method"));
+ encoder->HandleString8(SpanFrom(method_));
+ encoder->HandleString8(SpanFrom("params"));
+ if (params_) {
+ params_->AppendSerialized(out);
+ } else {
+ encoder->HandleMapBegin();
+ encoder->HandleMapEnd();
+ }
+ encoder->HandleMapEnd();
+ assert(status.ok());
+ }
+
+ private:
+ const char* method_;
+ std::unique_ptr<Serializable> params_;
+};
+} // namespace
+
+std::unique_ptr<Serializable> CreateResponse(
+ int call_id,
+ std::unique_ptr<Serializable> params) {
+ return std::make_unique<Response>(call_id, std::move(params));
+}
+
+std::unique_ptr<Serializable> CreateNotification(
+ const char* method,
+ std::unique_ptr<Serializable> params) {
+ return std::make_unique<Notification>(method, std::move(params));
+}
+
+// =============================================================================
+// DomainDispatcher - Dispatching betwen protocol methods within a domain.
+// =============================================================================
+DomainDispatcher::WeakPtr::WeakPtr(DomainDispatcher* dispatcher)
+ : dispatcher_(dispatcher) {}
+
+DomainDispatcher::WeakPtr::~WeakPtr() {
+ if (dispatcher_)
+ dispatcher_->weak_ptrs_.erase(this);
+}
+
+DomainDispatcher::Callback::~Callback() = default;
+
+void DomainDispatcher::Callback::dispose() {
+ backend_impl_ = nullptr;
+}
+
+DomainDispatcher::Callback::Callback(
+ std::unique_ptr<DomainDispatcher::WeakPtr> backend_impl,
+ int call_id,
+ span<uint8_t> method,
+ span<uint8_t> message)
+ : backend_impl_(std::move(backend_impl)),
+ call_id_(call_id),
+ method_(method),
+ message_(message.begin(), message.end()) {}
+
+void DomainDispatcher::Callback::sendIfActive(
+ std::unique_ptr<Serializable> partialMessage,
+ const DispatchResponse& response) {
+ if (!backend_impl_ || !backend_impl_->get())
+ return;
+ backend_impl_->get()->sendResponse(call_id_, response,
+ std::move(partialMessage));
+ backend_impl_ = nullptr;
+}
+
+void DomainDispatcher::Callback::fallThroughIfActive() {
+ if (!backend_impl_ || !backend_impl_->get())
+ return;
+ backend_impl_->get()->channel()->FallThrough(call_id_, method_,
+ SpanFrom(message_));
+ backend_impl_ = nullptr;
+}
+
+DomainDispatcher::DomainDispatcher(FrontendChannel* frontendChannel)
+ : frontend_channel_(frontendChannel) {}
+
+DomainDispatcher::~DomainDispatcher() {
+ clearFrontend();
+}
+
+void DomainDispatcher::sendResponse(int call_id,
+ const DispatchResponse& response,
+ std::unique_ptr<Serializable> result) {
+ if (!frontend_channel_)
+ return;
+ std::unique_ptr<Serializable> serializable;
+ if (response.IsError()) {
+ serializable = CreateErrorResponse(call_id, response);
+ } else {
+ serializable = CreateResponse(call_id, std::move(result));
+ }
+ frontend_channel_->SendProtocolResponse(call_id, std::move(serializable));
+}
+
+bool DomainDispatcher::MaybeReportInvalidParams(
+ const Dispatchable& dispatchable,
+ const ErrorSupport& errors) {
+ if (errors.Errors().empty())
+ return false;
+ if (frontend_channel_) {
+ frontend_channel_->SendProtocolResponse(
+ dispatchable.CallId(),
+ CreateErrorResponse(
+ dispatchable.CallId(),
+ DispatchResponse::InvalidParams("Invalid parameters"), &errors));
+ }
+ return true;
+}
+
+void DomainDispatcher::clearFrontend() {
+ frontend_channel_ = nullptr;
+ for (auto& weak : weak_ptrs_)
+ weak->dispose();
+ weak_ptrs_.clear();
+}
+
+std::unique_ptr<DomainDispatcher::WeakPtr> DomainDispatcher::weakPtr() {
+ auto weak = std::make_unique<DomainDispatcher::WeakPtr>(this);
+ weak_ptrs_.insert(weak.get());
+ return weak;
+}
+
+// =============================================================================
+// UberDispatcher - dispatches between domains (backends).
+// =============================================================================
+UberDispatcher::DispatchResult::DispatchResult(bool method_found,
+ std::function<void()> runnable)
+ : method_found_(method_found), runnable_(runnable) {}
+
+void UberDispatcher::DispatchResult::Run() {
+ if (!runnable_)
+ return;
+ runnable_();
+ runnable_ = nullptr;
+}
+
+UberDispatcher::UberDispatcher(FrontendChannel* frontend_channel)
+ : frontend_channel_(frontend_channel) {
+ assert(frontend_channel);
+}
+
+UberDispatcher::~UberDispatcher() = default;
+
+constexpr size_t kNotFound = std::numeric_limits<size_t>::max();
+
+namespace {
+size_t DotIdx(span<uint8_t> method) {
+ const void* p = memchr(method.data(), '.', method.size());
+ return p ? reinterpret_cast<const uint8_t*>(p) - method.data() : kNotFound;
+}
+} // namespace
+
+UberDispatcher::DispatchResult UberDispatcher::Dispatch(
+ const Dispatchable& dispatchable) const {
+ span<uint8_t> method = FindByFirst(redirects_, dispatchable.Method(),
+ /*default_value=*/dispatchable.Method());
+ size_t dot_idx = DotIdx(method);
+ if (dot_idx != kNotFound) {
+ span<uint8_t> domain = method.subspan(0, dot_idx);
+ span<uint8_t> command = method.subspan(dot_idx + 1);
+ DomainDispatcher* dispatcher = FindByFirst(dispatchers_, domain);
+ if (dispatcher) {
+ std::function<void(const Dispatchable&)> dispatched =
+ dispatcher->Dispatch(command);
+ if (dispatched) {
+ return DispatchResult(
+ true, [dispatchable, dispatched = std::move(dispatched)]() {
+ dispatched(dispatchable);
+ });
+ }
+ }
+ }
+ return DispatchResult(false, [this, dispatchable]() {
+ frontend_channel_->SendProtocolResponse(
+ dispatchable.CallId(),
+ CreateErrorResponse(dispatchable.CallId(),
+ DispatchResponse::MethodNotFound(
+ "'" +
+ std::string(dispatchable.Method().begin(),
+ dispatchable.Method().end()) +
+ "' wasn't found")));
+ });
+}
+
+template <typename T>
+struct FirstLessThan {
+ bool operator()(const std::pair<span<uint8_t>, T>& left,
+ const std::pair<span<uint8_t>, T>& right) {
+ return SpanLessThan(left.first, right.first);
+ }
+};
+
+void UberDispatcher::WireBackend(
+ span<uint8_t> domain,
+ const std::vector<std::pair<span<uint8_t>, span<uint8_t>>>&
+ sorted_redirects,
+ std::unique_ptr<DomainDispatcher> dispatcher) {
+ auto it = redirects_.insert(redirects_.end(), sorted_redirects.begin(),
+ sorted_redirects.end());
+ std::inplace_merge(redirects_.begin(), it, redirects_.end(),
+ FirstLessThan<span<uint8_t>>());
+ auto jt = dispatchers_.insert(dispatchers_.end(),
+ std::make_pair(domain, std::move(dispatcher)));
+ std::inplace_merge(dispatchers_.begin(), jt, dispatchers_.end(),
+ FirstLessThan<std::unique_ptr<DomainDispatcher>>());
+}
+
+} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/dispatch.h b/deps/v8/third_party/inspector_protocol/crdtp/dispatch.h
new file mode 100644
index 0000000000..5253cd87ed
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/dispatch.h
@@ -0,0 +1,311 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRDTP_DISPATCH_H_
+#define V8_CRDTP_DISPATCH_H_
+
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <string>
+#include <unordered_set>
+#include "export.h"
+#include "serializable.h"
+#include "span.h"
+#include "status.h"
+
+namespace v8_crdtp {
+class FrontendChannel;
+class ErrorSupport;
+namespace cbor {
+class CBORTokenizer;
+} // namespace cbor
+
+// =============================================================================
+// DispatchResponse - Error status and chaining / fall through
+// =============================================================================
+enum class DispatchCode {
+ SUCCESS = 1,
+ FALL_THROUGH = 2,
+ // For historical reasons, these error codes correspond to commonly used
+ // XMLRPC codes (e.g. see METHOD_NOT_FOUND in
+ // https://github.com/python/cpython/blob/master/Lib/xmlrpc/client.py).
+ PARSE_ERROR = -32700,
+ INVALID_REQUEST = -32600,
+ METHOD_NOT_FOUND = -32601,
+ INVALID_PARAMS = -32602,
+ INTERNAL_ERROR = -32603,
+ SERVER_ERROR = -32000,
+};
+
+// Information returned by command handlers. Usually returned after command
+// execution attempts.
+class DispatchResponse {
+ public:
+ const std::string& Message() const { return message_; }
+
+ DispatchCode Code() const { return code_; }
+
+ bool IsSuccess() const { return code_ == DispatchCode::SUCCESS; }
+ bool IsFallThrough() const { return code_ == DispatchCode::FALL_THROUGH; }
+ bool IsError() const { return code_ < DispatchCode::SUCCESS; }
+
+ static DispatchResponse Success();
+ static DispatchResponse FallThrough();
+
+ // Indicates that a message could not be parsed. E.g., malformed JSON.
+ static DispatchResponse ParseError(std::string message);
+
+ // Indicates that a request is lacking required top-level properties
+ // ('id', 'method'), has top-level properties of the wrong type, or has
+ // unknown top-level properties.
+ static DispatchResponse InvalidRequest(std::string message);
+
+ // Indicates that a protocol method such as "Page.bringToFront" could not be
+ // dispatched because it's not known to the (domain) dispatcher.
+ static DispatchResponse MethodNotFound(std::string message);
+
+ // Indicates that the params sent to a domain handler are invalid.
+ static DispatchResponse InvalidParams(std::string message);
+
+ // Used for application level errors, e.g. within protocol agents.
+ static DispatchResponse InternalError();
+
+ // Used for application level errors, e.g. within protocol agents.
+ static DispatchResponse ServerError(std::string message);
+
+ private:
+ DispatchResponse() = default;
+ DispatchCode code_;
+ std::string message_;
+};
+
+// =============================================================================
+// Dispatchable - a shallow parser for CBOR encoded DevTools messages
+// =============================================================================
+
+// This parser extracts only the known top-level fields from a CBOR encoded map;
+// method, id, sessionId, and params.
+class Dispatchable {
+ public:
+ // This constructor parses the |serialized| message. If successful,
+ // |ok()| will yield |true|, and |Method()|, |SessionId()|, |CallId()|,
+ // |Params()| can be used to access, the extracted contents. Otherwise,
+ // |ok()| will yield |false|, and |DispatchError()| can be
+ // used to send a response or notification to the client.
+ explicit Dispatchable(span<uint8_t> serialized);
+
+ // The serialized message that we just parsed.
+ span<uint8_t> Serialized() const { return serialized_; }
+
+ // Yields true if parsing was successful. This is cheaper than calling
+ // ::DispatchError().
+ bool ok() const;
+
+ // If !ok(), returns a DispatchResponse with appropriate code and error
+ // which can be sent to the client as a response or notification.
+ DispatchResponse DispatchError() const;
+
+ // Top level field: the command to be executed, fully qualified by
+ // domain. E.g. "Page.createIsolatedWorld".
+ span<uint8_t> Method() const { return method_; }
+ // Used to identify protocol connections attached to a specific
+ // target. See Target.attachToTarget, Target.setAutoAttach.
+ span<uint8_t> SessionId() const { return session_id_; }
+ // The call id, a sequence number that's used in responses to indicate
+ // the request to which the response belongs.
+ int32_t CallId() const { return call_id_; }
+ bool HasCallId() const { return has_call_id_; }
+ // The payload of the request in CBOR format. The |Dispatchable| parser does
+ // not parse into this; it only provides access to its raw contents here.
+ span<uint8_t> Params() const { return params_; }
+
+ private:
+ bool MaybeParseProperty(cbor::CBORTokenizer* tokenizer);
+ bool MaybeParseCallId(cbor::CBORTokenizer* tokenizer);
+ bool MaybeParseMethod(cbor::CBORTokenizer* tokenizer);
+ bool MaybeParseParams(cbor::CBORTokenizer* tokenizer);
+ bool MaybeParseSessionId(cbor::CBORTokenizer* tokenizer);
+
+ span<uint8_t> serialized_;
+
+ Status status_;
+
+ bool has_call_id_ = false;
+ int32_t call_id_;
+ span<uint8_t> method_;
+ bool params_seen_ = false;
+ span<uint8_t> params_;
+ span<uint8_t> session_id_;
+};
+
+// =============================================================================
+// Helpers for creating protocol cresponses and notifications.
+// =============================================================================
+
+// The resulting notifications can be sent to a protocol client,
+// usually via a FrontendChannel (see frontend_channel.h).
+
+std::unique_ptr<Serializable> CreateErrorResponse(
+ int callId,
+ DispatchResponse dispatch_response,
+ const ErrorSupport* errors = nullptr);
+
+std::unique_ptr<Serializable> CreateErrorNotification(
+ DispatchResponse dispatch_response);
+
+std::unique_ptr<Serializable> CreateResponse(
+ int callId,
+ std::unique_ptr<Serializable> params);
+
+std::unique_ptr<Serializable> CreateNotification(
+ const char* method,
+ std::unique_ptr<Serializable> params = nullptr);
+
+// =============================================================================
+// DomainDispatcher - Dispatching betwen protocol methods within a domain.
+// =============================================================================
+
+// This class is subclassed by |DomainDispatcherImpl|, which we generate per
+// DevTools domain. It contains routines called from the generated code,
+// e.g. ::MaybeReportInvalidParams, which are optimized for small code size.
+// The most important method is ::Dispatch, which implements method dispatch
+// by command name lookup.
+class DomainDispatcher {
+ public:
+ class WeakPtr {
+ public:
+ explicit WeakPtr(DomainDispatcher*);
+ ~WeakPtr();
+ DomainDispatcher* get() { return dispatcher_; }
+ void dispose() { dispatcher_ = nullptr; }
+
+ private:
+ DomainDispatcher* dispatcher_;
+ };
+
+ class Callback {
+ public:
+ virtual ~Callback();
+ void dispose();
+
+ protected:
+ // |method| must point at static storage (a C++ string literal in practice).
+ Callback(std::unique_ptr<WeakPtr> backend_impl,
+ int call_id,
+ span<uint8_t> method,
+ span<uint8_t> message);
+
+ void sendIfActive(std::unique_ptr<Serializable> partialMessage,
+ const DispatchResponse& response);
+ void fallThroughIfActive();
+
+ private:
+ std::unique_ptr<WeakPtr> backend_impl_;
+ int call_id_;
+ // Subclasses of this class are instantiated from generated code which
+ // passes a string literal for the method name to the constructor. So the
+ // storage for |method| is the binary of the running process.
+ span<uint8_t> method_;
+ std::vector<uint8_t> message_;
+ };
+
+ explicit DomainDispatcher(FrontendChannel*);
+ virtual ~DomainDispatcher();
+
+ // Given a |command_name| without domain qualification, looks up the
+ // corresponding method. If the method is not found, returns nullptr.
+ // Otherwise, Returns a closure that will parse the provided
+ // Dispatchable.params() to a protocol object and execute the
+ // apprpropriate method. If the parsing fails it will issue an
+ // error response on the frontend channel, otherwise it will execute the
+ // command.
+ virtual std::function<void(const Dispatchable&)> Dispatch(
+ span<uint8_t> command_name) = 0;
+
+ // Sends a response to the client via the channel.
+ void sendResponse(int call_id,
+ const DispatchResponse&,
+ std::unique_ptr<Serializable> result = nullptr);
+
+ // Returns true if |errors| contains errors *and* reports these errors
+ // as a response on the frontend channel. Called from generated code,
+ // optimized for code size of the callee.
+ bool MaybeReportInvalidParams(const Dispatchable& dispatchable,
+ const ErrorSupport& errors);
+
+ FrontendChannel* channel() { return frontend_channel_; }
+
+ void clearFrontend();
+
+ std::unique_ptr<WeakPtr> weakPtr();
+
+ private:
+ FrontendChannel* frontend_channel_;
+ std::unordered_set<WeakPtr*> weak_ptrs_;
+};
+
+// =============================================================================
+// UberDispatcher - dispatches between domains (backends).
+// =============================================================================
+class UberDispatcher {
+ public:
+ // Return type for ::Dispatch.
+ class DispatchResult {
+ public:
+ DispatchResult(bool method_found, std::function<void()> runnable);
+
+ // Indicates whether the method was found, that is, it could be dispatched
+ // to a backend registered with this dispatcher.
+ bool MethodFound() const { return method_found_; }
+
+ // Runs the dispatched result. This will send the appropriate error
+ // responses if the method wasn't found or if something went wrong during
+ // parameter parsing.
+ void Run();
+
+ private:
+ bool method_found_;
+ std::function<void()> runnable_;
+ };
+
+ // |frontend_hannel| can't be nullptr.
+ explicit UberDispatcher(FrontendChannel* frontend_channel);
+ virtual ~UberDispatcher();
+
+ // Dispatches the provided |dispatchable| considering all redirects and domain
+ // handlers registered with this uber dispatcher. Also see |DispatchResult|.
+ // |dispatchable.ok()| must hold - callers must check this separately and
+ // deal with errors.
+ DispatchResult Dispatch(const Dispatchable& dispatchable) const;
+
+ // Invoked from generated code for wiring domain backends; that is,
+ // connecting domain handlers to an uber dispatcher.
+ // See <domain-namespace>::Dispatcher::Wire(UberDispatcher*,Backend*).
+ FrontendChannel* channel() const {
+ assert(frontend_channel_);
+ return frontend_channel_;
+ }
+
+ // Invoked from generated code for wiring domain backends; that is,
+ // connecting domain handlers to an uber dispatcher.
+ // See <domain-namespace>::Dispatcher::Wire(UberDispatcher*,Backend*).
+ void WireBackend(span<uint8_t> domain,
+ const std::vector<std::pair<span<uint8_t>, span<uint8_t>>>&,
+ std::unique_ptr<DomainDispatcher> dispatcher);
+
+ private:
+ DomainDispatcher* findDispatcher(span<uint8_t> method);
+ FrontendChannel* const frontend_channel_;
+ // Pairs of ascii strings of the form ("Domain1.method1","Domain2.method2")
+ // indicating that the first element of each pair redirects to the second.
+ // Sorted by first element.
+ std::vector<std::pair<span<uint8_t>, span<uint8_t>>> redirects_;
+ // Domain dispatcher instances, sorted by their domain name.
+ std::vector<std::pair<span<uint8_t>, std::unique_ptr<DomainDispatcher>>>
+ dispatchers_;
+};
+} // namespace v8_crdtp
+
+#endif // V8_CRDTP_DISPATCH_H_
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/dispatch_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/dispatch_test.cc
new file mode 100644
index 0000000000..846ec08c0d
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/dispatch_test.cc
@@ -0,0 +1,445 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "cbor.h"
+#include "dispatch.h"
+#include "error_support.h"
+#include "frontend_channel.h"
+#include "json.h"
+#include "test_platform.h"
+
+namespace v8_crdtp {
+// =============================================================================
+// DispatchResponse - Error status and chaining / fall through
+// =============================================================================
+TEST(DispatchResponseTest, OK) {
+ EXPECT_EQ(DispatchCode::SUCCESS, DispatchResponse::Success().Code());
+ EXPECT_TRUE(DispatchResponse::Success().IsSuccess());
+}
+
+TEST(DispatchResponseTest, ServerError) {
+ DispatchResponse error = DispatchResponse::ServerError("Oops!");
+ EXPECT_FALSE(error.IsSuccess());
+ EXPECT_EQ(DispatchCode::SERVER_ERROR, error.Code());
+ EXPECT_EQ("Oops!", error.Message());
+}
+
+TEST(DispatchResponseTest, InternalError) {
+ DispatchResponse error = DispatchResponse::InternalError();
+ EXPECT_FALSE(error.IsSuccess());
+ EXPECT_EQ(DispatchCode::INTERNAL_ERROR, error.Code());
+ EXPECT_EQ("Internal error", error.Message());
+}
+
+TEST(DispatchResponseTest, InvalidParams) {
+ DispatchResponse error = DispatchResponse::InvalidParams("too cool");
+ EXPECT_FALSE(error.IsSuccess());
+ EXPECT_EQ(DispatchCode::INVALID_PARAMS, error.Code());
+ EXPECT_EQ("too cool", error.Message());
+}
+
+TEST(DispatchResponseTest, FallThrough) {
+ DispatchResponse error = DispatchResponse::FallThrough();
+ EXPECT_FALSE(error.IsSuccess());
+ EXPECT_TRUE(error.IsFallThrough());
+ EXPECT_EQ(DispatchCode::FALL_THROUGH, error.Code());
+}
+
+// =============================================================================
+// Dispatchable - a shallow parser for CBOR encoded DevTools messages
+// =============================================================================
+TEST(DispatchableTest, MessageMustBeAnObject) {
+ // Provide no input whatsoever.
+ span<uint8_t> empty_span;
+ Dispatchable empty(empty_span);
+ EXPECT_FALSE(empty.ok());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, empty.DispatchError().Code());
+ EXPECT_EQ("Message must be an object", empty.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageMustHaveIntegerIdProperty) {
+ // Construct an empty map inside of an envelope.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(json::ConvertJSONToCBOR(SpanFrom("{}"), &cbor).ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_FALSE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ("Message must have integer 'id' property",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageMustHaveIntegerIdProperty_IncorrectType) {
+ // This time we set the id property, but fail to make it an int32.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(
+ json::ConvertJSONToCBOR(SpanFrom("{\"id\":\"foo\"}"), &cbor).ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_FALSE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ("Message must have integer 'id' property",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageMustHaveStringMethodProperty) {
+ // This time we set the id property, but not the method property.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(json::ConvertJSONToCBOR(SpanFrom("{\"id\":42}"), &cbor).ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ("Message must have string 'method' property",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageMustHaveStringMethodProperty_IncorrectType) {
+ // This time we set the method property, but fail to make it a string.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(
+ json::ConvertJSONToCBOR(SpanFrom("{\"id\":42,\"method\":42}"), &cbor)
+ .ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ("Message must have string 'method' property",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageMayHaveStringSessionIdProperty) {
+ // This time, the session id is an int but it should be a string. Method and
+ // call id are present.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(json::ConvertJSONToCBOR(
+ SpanFrom("{\"id\":42,\"method\":\"Foo.executeBar\","
+ "\"sessionId\":42" // int32 is wrong type
+ "}"),
+ &cbor)
+ .ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ("Message may have string 'sessionId' property",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageMayHaveObjectParamsProperty) {
+ // This time, we fail to use the correct type for the params property.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(json::ConvertJSONToCBOR(
+ SpanFrom("{\"id\":42,\"method\":\"Foo.executeBar\","
+ "\"params\":42" // int32 is wrong type
+ "}"),
+ &cbor)
+ .ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ("Message may have object 'params' property",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, MessageWithUnknownProperty) {
+ // This time we set the 'unknown' property, so we are told what's allowed.
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(
+ json::ConvertJSONToCBOR(SpanFrom("{\"id\":42,\"unknown\":42}"), &cbor)
+ .ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(DispatchCode::INVALID_REQUEST, dispatchable.DispatchError().Code());
+ EXPECT_EQ(
+ "Message has property other than 'id', 'method', 'sessionId', 'params'",
+ dispatchable.DispatchError().Message());
+}
+
+TEST(DispatchableTest, DuplicateMapKey) {
+ for (const std::string& json :
+ {"{\"id\":42,\"id\":42}", "{\"params\":null,\"params\":null}",
+ "{\"method\":\"foo\",\"method\":\"foo\"}",
+ "{\"sessionId\":\"42\",\"sessionId\":\"42\"}"}) {
+ SCOPED_TRACE("json = " + json);
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(json::ConvertJSONToCBOR(SpanFrom(json), &cbor).ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_EQ(DispatchCode::PARSE_ERROR, dispatchable.DispatchError().Code());
+ EXPECT_THAT(dispatchable.DispatchError().Message(),
+ testing::StartsWith("CBOR: duplicate map key at position "));
+ }
+}
+
+TEST(DispatchableTest, ValidMessageParsesOK_NoParams) {
+ for (const std::string& json :
+ {"{\"id\":42,\"method\":\"Foo.executeBar\",\"sessionId\":"
+ "\"f421ssvaz4\"}",
+ "{\"id\":42,\"method\":\"Foo.executeBar\",\"sessionId\":\"f421ssvaz4\","
+ "\"params\":null}"}) {
+ SCOPED_TRACE("json = " + json);
+ std::vector<uint8_t> cbor;
+ ASSERT_TRUE(json::ConvertJSONToCBOR(SpanFrom(json), &cbor).ok());
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_TRUE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(42, dispatchable.CallId());
+ EXPECT_EQ("Foo.executeBar", std::string(dispatchable.Method().begin(),
+ dispatchable.Method().end()));
+ EXPECT_EQ("f421ssvaz4", std::string(dispatchable.SessionId().begin(),
+ dispatchable.SessionId().end()));
+ EXPECT_TRUE(dispatchable.Params().empty());
+ }
+}
+
+TEST(DispatchableTest, ValidMessageParsesOK_WithParams) {
+ std::vector<uint8_t> cbor;
+ cbor::EnvelopeEncoder envelope;
+ envelope.EncodeStart(&cbor);
+ cbor.push_back(cbor::EncodeIndefiniteLengthMapStart());
+ cbor::EncodeString8(SpanFrom("id"), &cbor);
+ cbor::EncodeInt32(42, &cbor);
+ cbor::EncodeString8(SpanFrom("method"), &cbor);
+ cbor::EncodeString8(SpanFrom("Foo.executeBar"), &cbor);
+ cbor::EncodeString8(SpanFrom("params"), &cbor);
+ cbor::EnvelopeEncoder params_envelope;
+ params_envelope.EncodeStart(&cbor);
+ // The |Dispatchable| class does not parse into the "params" envelope,
+ // so we can stick anything into there for the purpose of this test.
+ // For convenience, we use a String8.
+ cbor::EncodeString8(SpanFrom("params payload"), &cbor);
+ params_envelope.EncodeStop(&cbor);
+ cbor::EncodeString8(SpanFrom("sessionId"), &cbor);
+ cbor::EncodeString8(SpanFrom("f421ssvaz4"), &cbor);
+ cbor.push_back(cbor::EncodeStop());
+ envelope.EncodeStop(&cbor);
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_TRUE(dispatchable.ok());
+ EXPECT_TRUE(dispatchable.HasCallId());
+ EXPECT_EQ(42, dispatchable.CallId());
+ EXPECT_EQ("Foo.executeBar", std::string(dispatchable.Method().begin(),
+ dispatchable.Method().end()));
+ EXPECT_EQ("f421ssvaz4", std::string(dispatchable.SessionId().begin(),
+ dispatchable.SessionId().end()));
+ cbor::CBORTokenizer params_tokenizer(dispatchable.Params());
+ ASSERT_EQ(cbor::CBORTokenTag::ENVELOPE, params_tokenizer.TokenTag());
+ params_tokenizer.EnterEnvelope();
+ ASSERT_EQ(cbor::CBORTokenTag::STRING8, params_tokenizer.TokenTag());
+ EXPECT_EQ("params payload", std::string(params_tokenizer.GetString8().begin(),
+ params_tokenizer.GetString8().end()));
+}
+
+TEST(DispatchableTest, FaultyCBORTrailingJunk) {
+ // In addition to the higher level parsing errors, we also catch CBOR
+ // structural corruption. E.g., in this case, the message would be
+ // OK but has some extra trailing bytes.
+ std::vector<uint8_t> cbor;
+ cbor::EnvelopeEncoder envelope;
+ envelope.EncodeStart(&cbor);
+ cbor.push_back(cbor::EncodeIndefiniteLengthMapStart());
+ cbor::EncodeString8(SpanFrom("id"), &cbor);
+ cbor::EncodeInt32(42, &cbor);
+ cbor::EncodeString8(SpanFrom("method"), &cbor);
+ cbor::EncodeString8(SpanFrom("Foo.executeBar"), &cbor);
+ cbor::EncodeString8(SpanFrom("sessionId"), &cbor);
+ cbor::EncodeString8(SpanFrom("f421ssvaz4"), &cbor);
+ cbor.push_back(cbor::EncodeStop());
+ envelope.EncodeStop(&cbor);
+ size_t trailing_junk_pos = cbor.size();
+ cbor.push_back('t');
+ cbor.push_back('r');
+ cbor.push_back('a');
+ cbor.push_back('i');
+ cbor.push_back('l');
+ Dispatchable dispatchable(SpanFrom(cbor));
+ EXPECT_FALSE(dispatchable.ok());
+ EXPECT_EQ(DispatchCode::PARSE_ERROR, dispatchable.DispatchError().Code());
+ EXPECT_EQ(56u, trailing_junk_pos);
+ EXPECT_EQ("CBOR: trailing junk at position 56",
+ dispatchable.DispatchError().Message());
+}
+
+// =============================================================================
+// Helpers for creating protocol cresponses and notifications.
+// =============================================================================
+TEST(CreateErrorResponseTest, SmokeTest) {
+ ErrorSupport errors;
+ errors.Push();
+ errors.SetName("foo");
+ errors.Push();
+ errors.SetName("bar");
+ errors.AddError("expected a string");
+ errors.SetName("baz");
+ errors.AddError("expected a surprise");
+ auto serializable = CreateErrorResponse(
+ 42, DispatchResponse::InvalidParams("invalid params message"), &errors);
+ std::string json;
+ auto status =
+ json::ConvertCBORToJSON(SpanFrom(serializable->Serialize()), &json);
+ ASSERT_TRUE(status.ok());
+ EXPECT_EQ(
+ "{\"id\":42,\"error\":"
+ "{\"code\":-32602,"
+ "\"message\":\"invalid params message\","
+ "\"data\":\"foo.bar: expected a string; "
+ "foo.baz: expected a surprise\"}}",
+ json);
+}
+
+TEST(CreateErrorNotificationTest, SmokeTest) {
+ auto serializable =
+ CreateErrorNotification(DispatchResponse::InvalidRequest("oops!"));
+ std::string json;
+ auto status =
+ json::ConvertCBORToJSON(SpanFrom(serializable->Serialize()), &json);
+ ASSERT_TRUE(status.ok());
+ EXPECT_EQ("{\"error\":{\"code\":-32600,\"message\":\"oops!\"}}", json);
+}
+
+TEST(CreateResponseTest, SmokeTest) {
+ auto serializable = CreateResponse(42, nullptr);
+ std::string json;
+ auto status =
+ json::ConvertCBORToJSON(SpanFrom(serializable->Serialize()), &json);
+ ASSERT_TRUE(status.ok());
+ EXPECT_EQ("{\"id\":42,\"result\":{}}", json);
+}
+
+TEST(CreateNotificationTest, SmokeTest) {
+ auto serializable = CreateNotification("Foo.bar");
+ std::string json;
+ auto status =
+ json::ConvertCBORToJSON(SpanFrom(serializable->Serialize()), &json);
+ ASSERT_TRUE(status.ok());
+ EXPECT_EQ("{\"method\":\"Foo.bar\",\"params\":{}}", json);
+}
+
+// =============================================================================
+// UberDispatcher - dispatches between domains (backends).
+// =============================================================================
+class TestChannel : public FrontendChannel {
+ public:
+ std::string JSON() const {
+ std::string json;
+ json::ConvertCBORToJSON(SpanFrom(cbor_), &json);
+ return json;
+ }
+
+ private:
+ void SendProtocolResponse(int call_id,
+ std::unique_ptr<Serializable> message) override {
+ cbor_ = message->Serialize();
+ }
+
+ void SendProtocolNotification(
+ std::unique_ptr<Serializable> message) override {
+ cbor_ = message->Serialize();
+ }
+
+ void FallThrough(int call_id,
+ span<uint8_t> method,
+ span<uint8_t> message) override {}
+
+ void FlushProtocolNotifications() override {}
+
+ std::vector<uint8_t> cbor_;
+};
+
+TEST(UberDispatcherTest, MethodNotFound) {
+ // No domain dispatchers are registered, so unsuprisingly, we'll get a method
+ // not found error and can see that DispatchResult::MethodFound() yields
+ // false.
+ TestChannel channel;
+ UberDispatcher dispatcher(&channel);
+ std::vector<uint8_t> message;
+ json::ConvertJSONToCBOR(SpanFrom("{\"id\":42,\"method\":\"Foo.bar\"}"),
+ &message);
+ Dispatchable dispatchable(SpanFrom(message));
+ ASSERT_TRUE(dispatchable.ok());
+ UberDispatcher::DispatchResult dispatched = dispatcher.Dispatch(dispatchable);
+ EXPECT_FALSE(dispatched.MethodFound());
+ dispatched.Run();
+ EXPECT_EQ(
+ "{\"id\":42,\"error\":"
+ "{\"code\":-32601,\"message\":\"'Foo.bar' wasn't found\"}}",
+ channel.JSON());
+}
+
+// A domain dispatcher which captured dispatched and executed commands in fields
+// for testing.
+class TestDomain : public DomainDispatcher {
+ public:
+ explicit TestDomain(FrontendChannel* channel) : DomainDispatcher(channel) {}
+
+ std::function<void(const Dispatchable&)> Dispatch(
+ span<uint8_t> command_name) override {
+ dispatched_commands_.push_back(
+ std::string(command_name.begin(), command_name.end()));
+ return [this](const Dispatchable& dispatchable) {
+ executed_commands_.push_back(dispatchable.CallId());
+ };
+ }
+
+ // Command names of the dispatched commands.
+ std::vector<std::string> DispatchedCommands() const {
+ return dispatched_commands_;
+ }
+
+ // Call ids of the executed commands.
+ std::vector<int32_t> ExecutedCommands() const { return executed_commands_; }
+
+ private:
+ std::vector<std::string> dispatched_commands_;
+ std::vector<int32_t> executed_commands_;
+};
+
+TEST(UberDispatcherTest, DispatchingToDomainWithRedirects) {
+ // This time, we register two domain dispatchers (Foo and Bar) and issue one
+ // command 'Foo.execute' which executes on Foo and one command 'Foo.redirect'
+ // which executes as 'Bar.redirected'.
+ TestChannel channel;
+ UberDispatcher dispatcher(&channel);
+ auto foo_dispatcher = std::make_unique<TestDomain>(&channel);
+ TestDomain* foo = foo_dispatcher.get();
+ auto bar_dispatcher = std::make_unique<TestDomain>(&channel);
+ TestDomain* bar = bar_dispatcher.get();
+
+ dispatcher.WireBackend(
+ SpanFrom("Foo"), {{SpanFrom("Foo.redirect"), SpanFrom("Bar.redirected")}},
+ std::move(foo_dispatcher));
+ dispatcher.WireBackend(SpanFrom("Bar"), {}, std::move(bar_dispatcher));
+
+ {
+ std::vector<uint8_t> message;
+ json::ConvertJSONToCBOR(SpanFrom("{\"id\":42,\"method\":\"Foo.execute\"}"),
+ &message);
+ Dispatchable dispatchable(SpanFrom(message));
+ ASSERT_TRUE(dispatchable.ok());
+ UberDispatcher::DispatchResult dispatched =
+ dispatcher.Dispatch(dispatchable);
+ EXPECT_TRUE(dispatched.MethodFound());
+ dispatched.Run();
+ }
+ {
+ std::vector<uint8_t> message;
+ json::ConvertJSONToCBOR(SpanFrom("{\"id\":43,\"method\":\"Foo.redirect\"}"),
+ &message);
+ Dispatchable dispatchable(SpanFrom(message));
+ ASSERT_TRUE(dispatchable.ok());
+ UberDispatcher::DispatchResult dispatched =
+ dispatcher.Dispatch(dispatchable);
+ EXPECT_TRUE(dispatched.MethodFound());
+ dispatched.Run();
+ }
+ EXPECT_THAT(foo->DispatchedCommands(), testing::ElementsAre("execute"));
+ EXPECT_THAT(foo->ExecutedCommands(), testing::ElementsAre(42));
+ EXPECT_THAT(bar->DispatchedCommands(), testing::ElementsAre("redirected"));
+ EXPECT_THAT(bar->ExecutedCommands(), testing::ElementsAre(43));
+}
+} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/error_support.cc b/deps/v8/third_party/inspector_protocol/crdtp/error_support.cc
new file mode 100644
index 0000000000..6fc6a033d5
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/error_support.cc
@@ -0,0 +1,59 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "error_support.h"
+
+#include <cassert>
+
+namespace v8_crdtp {
+
+void ErrorSupport::Push() {
+ stack_.emplace_back();
+}
+
+void ErrorSupport::Pop() {
+ stack_.pop_back();
+}
+
+void ErrorSupport::SetName(const char* name) {
+ assert(!stack_.empty());
+ stack_.back().type = NAME;
+ stack_.back().name = name;
+}
+
+void ErrorSupport::SetIndex(size_t index) {
+ assert(!stack_.empty());
+ stack_.back().type = INDEX;
+ stack_.back().index = index;
+}
+
+void ErrorSupport::AddError(const char* error) {
+ assert(!stack_.empty());
+ if (!errors_.empty())
+ errors_ += "; ";
+ for (size_t ii = 0; ii < stack_.size(); ++ii) {
+ if (ii)
+ errors_ += ".";
+ const Segment& s = stack_[ii];
+ switch (s.type) {
+ case NAME:
+ errors_ += s.name;
+ continue;
+ case INDEX:
+ errors_ += std::to_string(s.index);
+ continue;
+ default:
+ assert(s.type != EMPTY);
+ continue;
+ }
+ }
+ errors_ += ": ";
+ errors_ += error;
+}
+
+span<uint8_t> ErrorSupport::Errors() const {
+ return SpanFrom(errors_);
+}
+
+} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/error_support.h b/deps/v8/third_party/inspector_protocol/crdtp/error_support.h
new file mode 100644
index 0000000000..34e2ce2118
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/error_support.h
@@ -0,0 +1,62 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRDTP_ERROR_SUPPORT_H_
+#define V8_CRDTP_ERROR_SUPPORT_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+#include "export.h"
+#include "span.h"
+
+namespace v8_crdtp {
+// =============================================================================
+// ErrorSupport - For tracking errors in tree structures.
+// =============================================================================
+
+// This abstraction is used when converting between Values and inspector
+// objects, e.g. in lib/ValueConversions_{h,cc}.template. As the processing
+// enters and exits a branch, we call Push / Pop. Within the branch,
+// we either set the name or an index (in case we're processing the element of a
+// list/vector). Only once an error is seen, the path which is now on the
+// stack is materialized and prefixes the error message. E.g.,
+// "foo.bar.2: some error". After error collection, ::Errors() is used to
+// access the message.
+class ErrorSupport {
+ public:
+ // Push / Pop operations for the path segments; after Push, either SetName or
+ // SetIndex must be called exactly once.
+ void Push();
+ void Pop();
+
+ // Sets the name of the current segment on the stack; e.g. a field name.
+ // |name| must be a C++ string literal in 7 bit US-ASCII.
+ void SetName(const char* name);
+ // Sets the index of the current segment on the stack; e.g. an array index.
+ void SetIndex(size_t index);
+
+ // Materializes the error internally. |error| must be a C++ string literal
+ // in 7 bit US-ASCII.
+ void AddError(const char* error);
+
+ // Returns the semicolon-separated list of errors as in 7 bit ASCII.
+ span<uint8_t> Errors() const;
+
+ private:
+ enum SegmentType { EMPTY, NAME, INDEX };
+ struct Segment {
+ SegmentType type = EMPTY;
+ union {
+ const char* name;
+ size_t index;
+ };
+ };
+ std::vector<Segment> stack_;
+ std::string errors_;
+};
+
+} // namespace v8_crdtp
+
+#endif // V8_CRDTP_ERROR_SUPPORT_H_
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/error_support_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/error_support_test.cc
new file mode 100644
index 0000000000..f7c075d6e7
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/error_support_test.cc
@@ -0,0 +1,45 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "error_support.h"
+
+#include <string>
+#include <vector>
+
+#include "test_platform.h"
+
+namespace v8_crdtp {
+TEST(ErrorSupportTest, Empty) {
+ ErrorSupport errors;
+ EXPECT_TRUE(errors.Errors().empty());
+}
+
+TEST(ErrorSupportTest, Nesting) {
+ ErrorSupport errors;
+ // Enter field foo, inter element at index 42, enter field bar, and encounter
+ // an error there ("something wrong").
+ errors.Push();
+ errors.SetName("foo");
+ errors.Push();
+ errors.SetIndex(42);
+ errors.Push();
+ errors.SetName("bar_sibling");
+ errors.SetName("bar");
+ errors.AddError("something wrong");
+ errors.Pop(); // bar
+ errors.Pop(); // 42
+ // The common case is actually that we'll enter some field, set the name
+ // or index, and leave without ever producing an error.
+ errors.Push();
+ errors.SetName("no_error_here");
+ errors.Pop(); // no_error_here
+ errors.Push();
+ errors.SetName("bang");
+ errors.AddError("one last error");
+ errors.Pop(); // bang
+ errors.Pop(); // foo
+ std::string out(errors.Errors().begin(), errors.Errors().end());
+ EXPECT_EQ("foo.42.bar: something wrong; foo.bang: one last error", out);
+}
+} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/find_by_first.h b/deps/v8/third_party/inspector_protocol/crdtp/find_by_first.h
new file mode 100644
index 0000000000..ae42241413
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/find_by_first.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRDTP_FIND_BY_FIRST_H_
+#define V8_CRDTP_FIND_BY_FIRST_H_
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "export.h"
+#include "span.h"
+
+namespace v8_crdtp {
+// =============================================================================
+// FindByFirst - Retrieval from a sorted vector that's keyed by span<uint8_t>.
+// =============================================================================
+
+// Given a vector of pairs sorted by the first element of each pair, find
+// the corresponding value given a key to be compared to the first element.
+// Together with std::inplace_merge and pre-sorting or std::sort, this can
+// be used to implement a minimalistic equivalent of Chromium's flat_map.
+
+// In this variant, the template parameter |T| is a value type and a
+// |default_value| is provided.
+template <typename T>
+T FindByFirst(const std::vector<std::pair<span<uint8_t>, T>>& sorted_by_first,
+ span<uint8_t> key,
+ T default_value) {
+ auto it = std::lower_bound(
+ sorted_by_first.begin(), sorted_by_first.end(), key,
+ [](const std::pair<span<uint8_t>, T>& left, span<uint8_t> right) {
+ return SpanLessThan(left.first, right);
+ });
+ return (it != sorted_by_first.end() && SpanEquals(it->first, key))
+ ? it->second
+ : default_value;
+}
+
+// In this variant, the template parameter |T| is a class or struct that's
+// instantiated in std::unique_ptr, and we return either a T* or a nullptr.
+template <typename T>
+T* FindByFirst(const std::vector<std::pair<span<uint8_t>, std::unique_ptr<T>>>&
+ sorted_by_first,
+ span<uint8_t> key) {
+ auto it = std::lower_bound(
+ sorted_by_first.begin(), sorted_by_first.end(), key,
+ [](const std::pair<span<uint8_t>, std::unique_ptr<T>>& left,
+ span<uint8_t> right) { return SpanLessThan(left.first, right); });
+ return (it != sorted_by_first.end() && SpanEquals(it->first, key))
+ ? it->second.get()
+ : nullptr;
+}
+} // namespace v8_crdtp
+
+#endif // V8_CRDTP_FIND_BY_FIRST_H_
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/find_by_first_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/find_by_first_test.cc
new file mode 100644
index 0000000000..67d5114869
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/find_by_first_test.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "find_by_first.h"
+#include "test_platform.h"
+
+namespace v8_crdtp {
+// =============================================================================
+// FindByFirst - Efficient retrieval from a sorted vector.
+// =============================================================================
+TEST(FindByFirst, SpanBySpan) {
+ std::vector<std::pair<span<uint8_t>, span<uint8_t>>> sorted_span_by_span = {
+ {SpanFrom("foo1"), SpanFrom("bar1")},
+ {SpanFrom("foo2"), SpanFrom("bar2")},
+ {SpanFrom("foo3"), SpanFrom("bar3")},
+ };
+ {
+ auto result = FindByFirst(sorted_span_by_span, SpanFrom("foo1"),
+ SpanFrom("not_found"));
+ EXPECT_EQ("bar1", std::string(result.begin(), result.end()));
+ }
+ {
+ auto result = FindByFirst(sorted_span_by_span, SpanFrom("foo3"),
+ SpanFrom("not_found"));
+ EXPECT_EQ("bar3", std::string(result.begin(), result.end()));
+ }
+ {
+ auto result = FindByFirst(sorted_span_by_span, SpanFrom("baz"),
+ SpanFrom("not_found"));
+ EXPECT_EQ("not_found", std::string(result.begin(), result.end()));
+ }
+}
+
+namespace {
+class TestObject {
+ public:
+ explicit TestObject(const std::string& message) : message_(message) {}
+
+ const std::string& message() const { return message_; }
+
+ private:
+ std::string message_;
+};
+} // namespace
+
+TEST(FindByFirst, ObjectBySpan) {
+ std::vector<std::pair<span<uint8_t>, std::unique_ptr<TestObject>>>
+ sorted_object_by_span;
+ sorted_object_by_span.push_back(
+ std::make_pair(SpanFrom("foo1"), std::make_unique<TestObject>("bar1")));
+ sorted_object_by_span.push_back(
+ std::make_pair(SpanFrom("foo2"), std::make_unique<TestObject>("bar2")));
+ sorted_object_by_span.push_back(
+ std::make_pair(SpanFrom("foo3"), std::make_unique<TestObject>("bar3")));
+ {
+ TestObject* result =
+ FindByFirst<TestObject>(sorted_object_by_span, SpanFrom("foo1"));
+ ASSERT_TRUE(result);
+ ASSERT_EQ("bar1", result->message());
+ }
+ {
+ TestObject* result =
+ FindByFirst<TestObject>(sorted_object_by_span, SpanFrom("foo3"));
+ ASSERT_TRUE(result);
+ ASSERT_EQ("bar3", result->message());
+ }
+ {
+ TestObject* result =
+ FindByFirst<TestObject>(sorted_object_by_span, SpanFrom("baz"));
+ ASSERT_FALSE(result);
+ }
+}
+} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/frontend_channel.h b/deps/v8/third_party/inspector_protocol/crdtp/frontend_channel.h
new file mode 100644
index 0000000000..eba8b2acbd
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/frontend_channel.h
@@ -0,0 +1,47 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRDTP_FRONTEND_CHANNEL_H_
+#define V8_CRDTP_FRONTEND_CHANNEL_H_
+
+#include <cstdint>
+#include <memory>
+#include "export.h"
+#include "serializable.h"
+#include "span.h"
+
+namespace v8_crdtp {
+// =============================================================================
+// FrontendChannel - For sending notifications and responses to protocol clients
+// =============================================================================
+class FrontendChannel {
+ public:
+ virtual ~FrontendChannel() = default;
+
+ // Sends protocol responses and notifications. The |call_id| parameter is
+ // seemingly redundant because it's also included in the message, but
+ // responses may be sent from an untrusted source to a trusted process (e.g.
+ // from Chromium's renderer (blink) to the browser process), which needs
+ // to be able to match the response to an earlier request without parsing the
+ // messsage.
+ virtual void SendProtocolResponse(int call_id,
+ std::unique_ptr<Serializable> message) = 0;
+ virtual void SendProtocolNotification(
+ std::unique_ptr<Serializable> message) = 0;
+
+ // FallThrough indicates that |message| should be handled in another layer.
+ // Usually this means the layer responding to the message didn't handle it,
+ // but in some cases messages are handled by multiple layers (e.g. both
+ // the embedder and the content layer in Chromium).
+ virtual void FallThrough(int call_id,
+ span<uint8_t> method,
+ span<uint8_t> message) = 0;
+
+ // Session implementations may queue notifications for performance or
+ // other considerations; this is a hook for domain handlers to manually flush.
+ virtual void FlushProtocolNotifications() = 0;
+};
+} // namespace v8_crdtp
+
+#endif // V8_CRDTP_FRONTEND_CHANNEL_H_
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/json.cc b/deps/v8/third_party/inspector_protocol/crdtp/json.cc
index 6701951881..0cf46aeda5 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/json.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/json.cc
@@ -285,7 +285,22 @@ class JSONEncoder : public ParserHandler {
Emit("null");
return;
}
+ // If |value| is a scalar, emit it as an int. Taken from json_writer.cc in
+ // Chromium.
+ if (value <= std::numeric_limits<int64_t>::max() &&
+ value >= std::numeric_limits<int64_t>::min() &&
+ std::floor(value) == value) {
+ Emit(std::to_string(static_cast<int64_t>(value)));
+ return;
+ }
std::string str_value = json::platform::DToStr(value);
+ // The following is somewhat paranoid, but also taken from json_writer.cc
+ // in Chromium:
+ // Ensure that the number has a .0 if there's no decimal or 'e'. This
+ // makes sure that when we read the JSON back, it's interpreted as a
+ // real rather than an int.
+ if (str_value.find_first_of(".eE") == std::string::npos)
+ str_value.append(".0");
// DToStr may fail to emit a 0 before the decimal dot. E.g. this is
// the case in base::NumberToString in Chromium (which is based on
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/json.h b/deps/v8/third_party/inspector_protocol/crdtp/json.h
index fa8522771f..63fce408bd 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/json.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/json.h
@@ -6,6 +6,7 @@
#define V8_CRDTP_JSON_H_
#include <memory>
+#include <vector>
#include "export.h"
#include "parser_handler.h"
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/json_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/json_test.cc
index d8742463de..eb6c3579aa 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/json_test.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/json_test.cc
@@ -184,6 +184,32 @@ TEST(JsonStdStringWriterTest, HelloWorld) {
out);
}
+TEST(JsonStdStringWriterTest, ScalarsAreRenderedAsInt) {
+ // Test that Number.MIN_SAFE_INTEGER / Number.MAX_SAFE_INTEGER from Javascript
+ // are rendered as integers (no decimal point / rounding), even when we
+ // encode them from double. Javascript's Number is an IEE754 double, so
+ // it has 53 bits to represent integers.
+ std::string out;
+ Status status;
+ std::unique_ptr<ParserHandler> writer = NewJSONEncoder(&out, &status);
+ writer->HandleMapBegin();
+
+ writer->HandleString8(SpanFrom("Number.MIN_SAFE_INTEGER"));
+ EXPECT_EQ(-0x1fffffffffffff, -9007199254740991); // 53 bits for integers.
+ writer->HandleDouble(-9007199254740991); // Note HandleDouble here.
+
+ writer->HandleString8(SpanFrom("Number.MAX_SAFE_INTEGER"));
+ EXPECT_EQ(0x1fffffffffffff, 9007199254740991); // 53 bits for integers.
+ writer->HandleDouble(9007199254740991); // Note HandleDouble here.
+
+ writer->HandleMapEnd();
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(
+ "{\"Number.MIN_SAFE_INTEGER\":-9007199254740991,"
+ "\"Number.MAX_SAFE_INTEGER\":9007199254740991}",
+ out);
+}
+
TEST(JsonStdStringWriterTest, RepresentingNonFiniteValuesAsNull) {
// JSON can't represent +Infinity, -Infinity, or NaN.
// So in practice it's mapped to null.
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/serializable.cc b/deps/v8/third_party/inspector_protocol/crdtp/serializable.cc
index 1df9eb291d..20de53ecc0 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/serializable.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/serializable.cc
@@ -9,9 +9,28 @@ namespace v8_crdtp {
// Serializable - An object to be emitted as a sequence of bytes.
// =============================================================================
-std::vector<uint8_t> Serializable::TakeSerialized() && {
+std::vector<uint8_t> Serializable::Serialize() const {
std::vector<uint8_t> out;
AppendSerialized(&out);
return out;
}
+
+namespace {
+class PreSerialized : public Serializable {
+ public:
+ explicit PreSerialized(std::vector<uint8_t> bytes) : bytes_(bytes) {}
+
+ void AppendSerialized(std::vector<uint8_t>* out) const override {
+ out->insert(out->end(), bytes_.begin(), bytes_.end());
+ }
+
+ private:
+ std::vector<uint8_t> bytes_;
+};
+} // namespace
+
+// static
+std::unique_ptr<Serializable> Serializable::From(std::vector<uint8_t> bytes) {
+ return std::make_unique<PreSerialized>(std::move(bytes));
+}
} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/serializable.h b/deps/v8/third_party/inspector_protocol/crdtp/serializable.h
index 57faf42316..66386711b0 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/serializable.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/serializable.h
@@ -6,6 +6,7 @@
#define V8_CRDTP_SERIALIZABLE_H_
#include <cstdint>
+#include <memory>
#include <vector>
#include "export.h"
@@ -13,17 +14,18 @@ namespace v8_crdtp {
// =============================================================================
// Serializable - An object to be emitted as a sequence of bytes.
// =============================================================================
-
class Serializable {
public:
- // The default implementation invokes AppendSerialized with an empty vector
- // and returns it; some subclasses may override and move out internal state
- // instead to avoid copying.
- virtual std::vector<uint8_t> TakeSerialized() &&;
+ // Convenience: Invokes |AppendSerialized| on an empty vector.
+ std::vector<uint8_t> Serialize() const;
virtual void AppendSerialized(std::vector<uint8_t>* out) const = 0;
virtual ~Serializable() = default;
+
+ // Wraps a vector of |bytes| into a Serializable for situations in which we
+ // eagerly serialize a structure.
+ static std::unique_ptr<Serializable> From(std::vector<uint8_t> bytes);
};
} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/serializable_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/serializable_test.cc
index da49f17517..d28ca69edd 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/serializable_test.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/serializable_test.cc
@@ -14,8 +14,7 @@ namespace v8_crdtp {
// =============================================================================
namespace {
-// Tests the default behavior for ::TakeSerialized (to invoke
-// ::AppendSerialized).
+// Tests ::Serialize (which invokes ::AppendSerialized).
class SimpleExample : public Serializable {
public:
explicit SimpleExample(const std::vector<uint8_t>& contents)
@@ -36,6 +35,6 @@ TEST(SerializableTest, YieldsContents) {
foo.AppendSerialized(&contents); // Yields contents by appending.
EXPECT_THAT(contents, testing::ElementsAre(1, 2, 3, 1, 2, 3));
// Yields contents by returning.
- EXPECT_THAT(std::move(foo).TakeSerialized(), testing::ElementsAre(1, 2, 3));
+ EXPECT_THAT(foo.Serialize(), testing::ElementsAre(1, 2, 3));
}
} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/span.cc b/deps/v8/third_party/inspector_protocol/crdtp/span.cc
new file mode 100644
index 0000000000..af30bb4fd9
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/crdtp/span.cc
@@ -0,0 +1,24 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "span.h"
+
+#include <algorithm>
+
+namespace v8_crdtp {
+
+bool SpanLessThan(span<uint8_t> x, span<uint8_t> y) noexcept {
+ auto min_size = std::min(x.size(), y.size());
+ const int r = min_size == 0 ? 0 : memcmp(x.data(), y.data(), min_size);
+ return (r < 0) || (r == 0 && x.size() < y.size());
+}
+
+bool SpanEquals(span<uint8_t> x, span<uint8_t> y) noexcept {
+ auto len = x.size();
+ if (len != y.size())
+ return false;
+ return x.data() == y.data() || len == 0 ||
+ std::memcmp(x.data(), y.data(), len) == 0;
+}
+} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/span.h b/deps/v8/third_party/inspector_protocol/crdtp/span.h
index cb94319d5f..ace5b511f6 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/span.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/span.h
@@ -5,11 +5,11 @@
#ifndef V8_CRDTP_SPAN_H_
#define V8_CRDTP_SPAN_H_
-#include <algorithm>
#include <cstdint>
#include <cstring>
#include <string>
-#include <vector>
+
+#include "export.h"
namespace v8_crdtp {
// =============================================================================
@@ -76,19 +76,15 @@ inline span<typename C::value_type> SpanFrom(const C& v) {
// Less than / equality comparison functions for sorting / searching for byte
// spans. These are similar to absl::string_view's < and == operators.
-constexpr inline bool SpanLessThan(span<uint8_t> x, span<uint8_t> y) noexcept {
- auto min_size = std::min(x.size(), y.size());
- const int r = min_size == 0 ? 0 : memcmp(x.data(), y.data(), min_size);
- return (r < 0) || (r == 0 && x.size() < y.size());
-}
+bool SpanLessThan(span<uint8_t> x, span<uint8_t> y) noexcept;
-constexpr inline bool SpanEquals(span<uint8_t> x, span<uint8_t> y) noexcept {
- auto len = x.size();
- if (len != y.size())
- return false;
- return x.data() == y.data() || len == 0 ||
- std::memcmp(x.data(), y.data(), len) == 0;
-}
+bool SpanEquals(span<uint8_t> x, span<uint8_t> y) noexcept;
+
+struct SpanLt {
+ bool operator()(span<uint8_t> l, span<uint8_t> r) const {
+ return SpanLessThan(l, r);
+ }
+};
} // namespace v8_crdtp
#endif // V8_CRDTP_SPAN_H_
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/span_test.cc b/deps/v8/third_party/inspector_protocol/crdtp/span_test.cc
index 718f03f885..0f31e5a6ba 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/span_test.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/span_test.cc
@@ -4,7 +4,6 @@
#include <cstdlib>
#include <string>
-#include <unordered_map>
#include "span.h"
#include "test_platform.h"
@@ -13,7 +12,6 @@ namespace v8_crdtp {
// =============================================================================
// span - sequence of bytes
// =============================================================================
-
template <typename T>
class SpanTest : public ::testing::Test {};
@@ -108,41 +106,4 @@ TEST(SpanComparisons, ByteWiseLexicographicalOrder) {
EXPECT_FALSE(SpanLessThan(SpanFrom(msg), SpanFrom(lesser_msg)));
EXPECT_FALSE(SpanEquals(SpanFrom(msg), SpanFrom(lesser_msg)));
}
-
-// TODO(johannes): The following shows how the span can be used in an
-// std::unordered_map as a key. Once we have a production usage, we'll move
-// SpanHash, SpanEq, SpanHasher into the header.
-
-// A simple hash code, inspired by http://stackoverflow.com/q/1646807.
-constexpr inline size_t SpanHash(span<uint8_t> s) noexcept {
- size_t hash = 17;
- for (uint8_t c : s)
- hash = 31 * hash + c;
- return hash;
-}
-
-// Structs for making std::unordered_map with std::span<uint8_t> keys.
-struct SpanEq {
- constexpr inline bool operator()(span<uint8_t> l, span<uint8_t> r) const {
- return SpanEquals(l, r);
- }
-};
-
-struct SpanHasher {
- constexpr inline size_t operator()(span<uint8_t> s) const {
- return SpanHash(s);
- }
-};
-
-TEST(SpanHasherAndSpanEq, SpanAsKeyInUnorderedMap) {
- // A very simple smoke test for unordered_map, storing three key/value pairs.
- std::unordered_map<span<uint8_t>, int32_t, SpanHasher, SpanEq> a_map;
- a_map[SpanFrom("foo")] = 1;
- a_map[SpanFrom("bar")] = 2;
- a_map[SpanFrom("baz")] = 3;
- EXPECT_EQ(3u, a_map.size());
- EXPECT_EQ(1, a_map[SpanFrom("foo")]);
- EXPECT_EQ(2, a_map[SpanFrom("bar")]);
- EXPECT_EQ(3, a_map[SpanFrom("baz")]);
-}
} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/status.cc b/deps/v8/third_party/inspector_protocol/crdtp/status.cc
index 57cb795985..4a8e03d389 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/status.cc
+++ b/deps/v8/third_party/inspector_protocol/crdtp/status.cc
@@ -9,100 +9,118 @@ namespace v8_crdtp {
// Status and Error codes
// =============================================================================
-std::string Status::ToASCIIString() const {
+std::string Status::Message() const {
switch (error) {
case Error::OK:
return "OK";
case Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS:
- return ToASCIIString("JSON: unprocessed input remains");
+ return "JSON: unprocessed input remains";
case Error::JSON_PARSER_STACK_LIMIT_EXCEEDED:
- return ToASCIIString("JSON: stack limit exceeded");
+ return "JSON: stack limit exceeded";
case Error::JSON_PARSER_NO_INPUT:
- return ToASCIIString("JSON: no input");
+ return "JSON: no input";
case Error::JSON_PARSER_INVALID_TOKEN:
- return ToASCIIString("JSON: invalid token");
+ return "JSON: invalid token";
case Error::JSON_PARSER_INVALID_NUMBER:
- return ToASCIIString("JSON: invalid number");
+ return "JSON: invalid number";
case Error::JSON_PARSER_INVALID_STRING:
- return ToASCIIString("JSON: invalid string");
+ return "JSON: invalid string";
case Error::JSON_PARSER_UNEXPECTED_ARRAY_END:
- return ToASCIIString("JSON: unexpected array end");
+ return "JSON: unexpected array end";
case Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED:
- return ToASCIIString("JSON: comma or array end expected");
+ return "JSON: comma or array end expected";
case Error::JSON_PARSER_STRING_LITERAL_EXPECTED:
- return ToASCIIString("JSON: string literal expected");
+ return "JSON: string literal expected";
case Error::JSON_PARSER_COLON_EXPECTED:
- return ToASCIIString("JSON: colon expected");
+ return "JSON: colon expected";
case Error::JSON_PARSER_UNEXPECTED_MAP_END:
- return ToASCIIString("JSON: unexpected map end");
+ return "JSON: unexpected map end";
case Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED:
- return ToASCIIString("JSON: comma or map end expected");
+ return "JSON: comma or map end expected";
case Error::JSON_PARSER_VALUE_EXPECTED:
- return ToASCIIString("JSON: value expected");
+ return "JSON: value expected";
case Error::CBOR_INVALID_INT32:
- return ToASCIIString("CBOR: invalid int32");
+ return "CBOR: invalid int32";
case Error::CBOR_INVALID_DOUBLE:
- return ToASCIIString("CBOR: invalid double");
+ return "CBOR: invalid double";
case Error::CBOR_INVALID_ENVELOPE:
- return ToASCIIString("CBOR: invalid envelope");
+ return "CBOR: invalid envelope";
case Error::CBOR_ENVELOPE_CONTENTS_LENGTH_MISMATCH:
- return ToASCIIString("CBOR: envelope contents length mismatch");
+ return "CBOR: envelope contents length mismatch";
case Error::CBOR_MAP_OR_ARRAY_EXPECTED_IN_ENVELOPE:
- return ToASCIIString("CBOR: map or array expected in envelope");
+ return "CBOR: map or array expected in envelope";
case Error::CBOR_INVALID_STRING8:
- return ToASCIIString("CBOR: invalid string8");
+ return "CBOR: invalid string8";
case Error::CBOR_INVALID_STRING16:
- return ToASCIIString("CBOR: invalid string16");
+ return "CBOR: invalid string16";
case Error::CBOR_INVALID_BINARY:
- return ToASCIIString("CBOR: invalid binary");
+ return "CBOR: invalid binary";
case Error::CBOR_UNSUPPORTED_VALUE:
- return ToASCIIString("CBOR: unsupported value");
+ return "CBOR: unsupported value";
case Error::CBOR_NO_INPUT:
- return ToASCIIString("CBOR: no input");
+ return "CBOR: no input";
case Error::CBOR_INVALID_START_BYTE:
- return ToASCIIString("CBOR: invalid start byte");
+ return "CBOR: invalid start byte";
case Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE:
- return ToASCIIString("CBOR: unexpected eof expected value");
+ return "CBOR: unexpected eof expected value";
case Error::CBOR_UNEXPECTED_EOF_IN_ARRAY:
- return ToASCIIString("CBOR: unexpected eof in array");
+ return "CBOR: unexpected eof in array";
case Error::CBOR_UNEXPECTED_EOF_IN_MAP:
- return ToASCIIString("CBOR: unexpected eof in map");
+ return "CBOR: unexpected eof in map";
case Error::CBOR_INVALID_MAP_KEY:
- return ToASCIIString("CBOR: invalid map key");
+ return "CBOR: invalid map key";
+ case Error::CBOR_DUPLICATE_MAP_KEY:
+ return "CBOR: duplicate map key";
case Error::CBOR_STACK_LIMIT_EXCEEDED:
- return ToASCIIString("CBOR: stack limit exceeded");
+ return "CBOR: stack limit exceeded";
case Error::CBOR_TRAILING_JUNK:
- return ToASCIIString("CBOR: trailing junk");
+ return "CBOR: trailing junk";
case Error::CBOR_MAP_START_EXPECTED:
- return ToASCIIString("CBOR: map start expected");
+ return "CBOR: map start expected";
case Error::CBOR_MAP_STOP_EXPECTED:
- return ToASCIIString("CBOR: map stop expected");
+ return "CBOR: map stop expected";
case Error::CBOR_ARRAY_START_EXPECTED:
- return ToASCIIString("CBOR: array start expected");
+ return "CBOR: array start expected";
case Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED:
- return ToASCIIString("CBOR: envelope size limit exceeded");
+ return "CBOR: envelope size limit exceeded";
+
+ case Error::MESSAGE_MUST_BE_AN_OBJECT:
+ return "Message must be an object";
+ case Error::MESSAGE_MUST_HAVE_INTEGER_ID_PROPERTY:
+ return "Message must have integer 'id' property";
+ case Error::MESSAGE_MUST_HAVE_STRING_METHOD_PROPERTY:
+ return "Message must have string 'method' property";
+ case Error::MESSAGE_MAY_HAVE_STRING_SESSION_ID_PROPERTY:
+ return "Message may have string 'sessionId' property";
+ case Error::MESSAGE_MAY_HAVE_OBJECT_PARAMS_PROPERTY:
+ return "Message may have object 'params' property";
+ case Error::MESSAGE_HAS_UNKNOWN_PROPERTY:
+ return "Message has property other than "
+ "'id', 'method', 'sessionId', 'params'";
case Error::BINDINGS_MANDATORY_FIELD_MISSING:
- return ToASCIIString("BINDINGS: mandatory field missing");
+ return "BINDINGS: mandatory field missing";
case Error::BINDINGS_BOOL_VALUE_EXPECTED:
- return ToASCIIString("BINDINGS: bool value expected");
+ return "BINDINGS: bool value expected";
case Error::BINDINGS_INT32_VALUE_EXPECTED:
- return ToASCIIString("BINDINGS: int32 value expected");
+ return "BINDINGS: int32 value expected";
case Error::BINDINGS_DOUBLE_VALUE_EXPECTED:
- return ToASCIIString("BINDINGS: double value expected");
+ return "BINDINGS: double value expected";
case Error::BINDINGS_STRING_VALUE_EXPECTED:
- return ToASCIIString("BINDINGS: string value expected");
+ return "BINDINGS: string value expected";
case Error::BINDINGS_STRING8_VALUE_EXPECTED:
- return ToASCIIString("BINDINGS: string8 value expected");
+ return "BINDINGS: string8 value expected";
case Error::BINDINGS_BINARY_VALUE_EXPECTED:
- return ToASCIIString("BINDINGS: binary value expected");
+ return "BINDINGS: binary value expected";
}
// Some compilers can't figure out that we can't get here.
return "INVALID ERROR CODE";
}
-std::string Status::ToASCIIString(const char* msg) const {
- return std::string(msg) + " at position " + std::to_string(pos);
+std::string Status::ToASCIIString() const {
+ if (ok())
+ return "OK";
+ return Message() + " at position " + std::to_string(pos);
}
} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/crdtp/status.h b/deps/v8/third_party/inspector_protocol/crdtp/status.h
index bf75615c09..ebb8ec98c0 100644
--- a/deps/v8/third_party/inspector_protocol/crdtp/status.h
+++ b/deps/v8/third_party/inspector_protocol/crdtp/status.h
@@ -18,7 +18,9 @@ namespace v8_crdtp {
enum class Error {
OK = 0,
- // JSON parsing errors - json_parser.{h,cc}.
+
+ // JSON parsing errors; checked when parsing / converting from JSON.
+ // See json.{h,cc}.
JSON_PARSER_UNPROCESSED_INPUT_REMAINS = 0x01,
JSON_PARSER_STACK_LIMIT_EXCEEDED = 0x02,
JSON_PARSER_NO_INPUT = 0x03,
@@ -33,6 +35,7 @@ enum class Error {
JSON_PARSER_COMMA_OR_MAP_END_EXPECTED = 0x0c,
JSON_PARSER_VALUE_EXPECTED = 0x0d,
+ // CBOR parsing errors; checked when parsing / converting from CBOR.
CBOR_INVALID_INT32 = 0x0e,
CBOR_INVALID_DOUBLE = 0x0f,
CBOR_INVALID_ENVELOPE = 0x10,
@@ -48,20 +51,31 @@ enum class Error {
CBOR_UNEXPECTED_EOF_IN_ARRAY = 0x1a,
CBOR_UNEXPECTED_EOF_IN_MAP = 0x1b,
CBOR_INVALID_MAP_KEY = 0x1c,
- CBOR_STACK_LIMIT_EXCEEDED = 0x1d,
- CBOR_TRAILING_JUNK = 0x1e,
- CBOR_MAP_START_EXPECTED = 0x1f,
- CBOR_MAP_STOP_EXPECTED = 0x20,
- CBOR_ARRAY_START_EXPECTED = 0x21,
- CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x22,
-
- BINDINGS_MANDATORY_FIELD_MISSING = 0x23,
- BINDINGS_BOOL_VALUE_EXPECTED = 0x24,
- BINDINGS_INT32_VALUE_EXPECTED = 0x25,
- BINDINGS_DOUBLE_VALUE_EXPECTED = 0x26,
- BINDINGS_STRING_VALUE_EXPECTED = 0x27,
- BINDINGS_STRING8_VALUE_EXPECTED = 0x28,
- BINDINGS_BINARY_VALUE_EXPECTED = 0x29,
+ CBOR_DUPLICATE_MAP_KEY = 0x1d,
+ CBOR_STACK_LIMIT_EXCEEDED = 0x1e,
+ CBOR_TRAILING_JUNK = 0x1f,
+ CBOR_MAP_START_EXPECTED = 0x20,
+ CBOR_MAP_STOP_EXPECTED = 0x21,
+ CBOR_ARRAY_START_EXPECTED = 0x22,
+ CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x23,
+
+ // Message errors are constraints we place on protocol messages coming
+ // from a protocol client; these are checked in crdtp::Dispatchable
+ // (see dispatch.h) as it performs a shallow parse.
+ MESSAGE_MUST_BE_AN_OBJECT = 0x24,
+ MESSAGE_MUST_HAVE_INTEGER_ID_PROPERTY = 0x25,
+ MESSAGE_MUST_HAVE_STRING_METHOD_PROPERTY = 0x26,
+ MESSAGE_MAY_HAVE_STRING_SESSION_ID_PROPERTY = 0x27,
+ MESSAGE_MAY_HAVE_OBJECT_PARAMS_PROPERTY = 0x28,
+ MESSAGE_HAS_UNKNOWN_PROPERTY = 0x29,
+
+ BINDINGS_MANDATORY_FIELD_MISSING = 0x30,
+ BINDINGS_BOOL_VALUE_EXPECTED = 0x31,
+ BINDINGS_INT32_VALUE_EXPECTED = 0x32,
+ BINDINGS_DOUBLE_VALUE_EXPECTED = 0x33,
+ BINDINGS_STRING_VALUE_EXPECTED = 0x34,
+ BINDINGS_STRING8_VALUE_EXPECTED = 0x35,
+ BINDINGS_BINARY_VALUE_EXPECTED = 0x36,
};
// A status value with position that can be copied. The default status
@@ -76,12 +90,18 @@ struct Status {
Status(Error error, size_t pos) : error(error), pos(pos) {}
Status() = default;
- // Returns a 7 bit US-ASCII string, either "OK" or an error message
- // that includes the position.
- std::string ToASCIIString() const;
+ bool IsMessageError() const {
+ return error >= Error::MESSAGE_MUST_BE_AN_OBJECT &&
+ error <= Error::MESSAGE_HAS_UNKNOWN_PROPERTY;
+ }
+
+ // Returns 7 bit US-ASCII string, either "OK" or an error message without
+ // position.
+ std::string Message() const;
- private:
- std::string ToASCIIString(const char* msg) const;
+ // Returns a 7 bit US-ASCII string, either "OK" or an error message that
+ // includes the position.
+ std::string ToASCIIString() const;
};
} // namespace v8_crdtp
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
index 6e83e87d2a..f4823847df 100644
--- a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
@@ -33,16 +33,9 @@ template("inspector_protocol_generate") {
invoker.config_file,
"$inspector_protocol_dir/lib/base_string_adapter_cc.template",
"$inspector_protocol_dir/lib/base_string_adapter_h.template",
- "$inspector_protocol_dir/lib/DispatcherBase_cpp.template",
- "$inspector_protocol_dir/lib/DispatcherBase_h.template",
- "$inspector_protocol_dir/lib/ErrorSupport_cpp.template",
- "$inspector_protocol_dir/lib/ErrorSupport_h.template",
"$inspector_protocol_dir/lib/Forward_h.template",
- "$inspector_protocol_dir/lib/FrontendChannel_h.template",
"$inspector_protocol_dir/lib/Object_cpp.template",
"$inspector_protocol_dir/lib/Object_h.template",
- "$inspector_protocol_dir/lib/Parser_cpp.template",
- "$inspector_protocol_dir/lib/Parser_h.template",
"$inspector_protocol_dir/lib/Protocol_cpp.template",
"$inspector_protocol_dir/lib/ValueConversions_h.template",
"$inspector_protocol_dir/lib/Values_cpp.template",
@@ -58,7 +51,9 @@ template("inspector_protocol_generate") {
args = [
"--jinja_dir",
- rebase_path("//third_party/", root_build_dir), # jinja is in chromium's third_party
+ rebase_path("//third_party/", root_build_dir), # jinja is in chromium's
+ # third_party
+
"--output_base",
rebase_path(invoker.out_dir, root_build_dir),
"--config",
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
deleted file mode 100644
index 52aa4977a9..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ /dev/null
@@ -1,355 +0,0 @@
-// This file is generated by DispatcherBase_cpp.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#include "DispatcherBase.h"
-//#include "Parser.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-// static
-DispatchResponse DispatchResponse::OK()
-{
- DispatchResponse result;
- result.m_status = kSuccess;
- result.m_errorCode = kParseError;
- return result;
-}
-
-// static
-DispatchResponse DispatchResponse::Error(const String& error)
-{
- DispatchResponse result;
- result.m_status = kError;
- result.m_errorCode = kServerError;
- result.m_errorMessage = error;
- return result;
-}
-
-// static
-DispatchResponse DispatchResponse::InternalError()
-{
- DispatchResponse result;
- result.m_status = kError;
- result.m_errorCode = kInternalError;
- result.m_errorMessage = "Internal error";
- return result;
-}
-
-// static
-DispatchResponse DispatchResponse::InvalidParams(const String& error)
-{
- DispatchResponse result;
- result.m_status = kError;
- result.m_errorCode = kInvalidParams;
- result.m_errorMessage = error;
- return result;
-}
-
-// static
-DispatchResponse DispatchResponse::FallThrough()
-{
- DispatchResponse result;
- result.m_status = kFallThrough;
- result.m_errorCode = kParseError;
- return result;
-}
-
-// static
-const char DispatcherBase::kInvalidParamsString[] = "Invalid parameters";
-
-DispatcherBase::WeakPtr::WeakPtr(DispatcherBase* dispatcher) : m_dispatcher(dispatcher) { }
-
-DispatcherBase::WeakPtr::~WeakPtr()
-{
- if (m_dispatcher)
- m_dispatcher->m_weakPtrs.erase(this);
-}
-
-DispatcherBase::Callback::Callback(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message)
- : m_backendImpl(std::move(backendImpl))
- , m_callId(callId)
- , m_method(method)
- , m_message(message.begin(), message.end()) { }
-
-DispatcherBase::Callback::~Callback() = default;
-
-void DispatcherBase::Callback::dispose()
-{
- m_backendImpl = nullptr;
-}
-
-void DispatcherBase::Callback::sendIfActive(std::unique_ptr<protocol::DictionaryValue> partialMessage, const DispatchResponse& response)
-{
- if (!m_backendImpl || !m_backendImpl->get())
- return;
- m_backendImpl->get()->sendResponse(m_callId, response, std::move(partialMessage));
- m_backendImpl = nullptr;
-}
-
-void DispatcherBase::Callback::fallThroughIfActive()
-{
- if (!m_backendImpl || !m_backendImpl->get())
- return;
- m_backendImpl->get()->channel()->fallThrough(m_callId, m_method, {{config.crdtp.namespace}}::SpanFrom(m_message));
- m_backendImpl = nullptr;
-}
-
-DispatcherBase::DispatcherBase(FrontendChannel* frontendChannel)
- : m_frontendChannel(frontendChannel) { }
-
-DispatcherBase::~DispatcherBase()
-{
- clearFrontend();
-}
-
-void DispatcherBase::sendResponse(int callId, const DispatchResponse& response, std::unique_ptr<protocol::DictionaryValue> result)
-{
- if (!m_frontendChannel)
- return;
- if (response.status() == DispatchResponse::kError) {
- reportProtocolError(callId, response.errorCode(), response.errorMessage(), nullptr);
- return;
- }
- m_frontendChannel->sendProtocolResponse(callId, InternalResponse::createResponse(callId, std::move(result)));
-}
-
-void DispatcherBase::sendResponse(int callId, const DispatchResponse& response)
-{
- sendResponse(callId, response, DictionaryValue::create());
-}
-
-namespace {
-
-class ProtocolError : public Serializable {
-public:
- static std::unique_ptr<ProtocolError> createErrorResponse(int callId, DispatchResponse::ErrorCode code, const String& errorMessage, ErrorSupport* errors)
- {
- std::unique_ptr<ProtocolError> protocolError(new ProtocolError(code, errorMessage));
- protocolError->m_callId = callId;
- protocolError->m_hasCallId = true;
- if (errors && errors->hasErrors())
- protocolError->m_data = errors->errors();
- return protocolError;
- }
-
- static std::unique_ptr<ProtocolError> createErrorNotification(DispatchResponse::ErrorCode code, const String& errorMessage)
- {
- return std::unique_ptr<ProtocolError>(new ProtocolError(code, errorMessage));
- }
-
- void AppendSerialized(std::vector<uint8_t>* out) const override
- {
- toDictionary()->AppendSerialized(out);
- }
-
- ~ProtocolError() override {}
-
-private:
- ProtocolError(DispatchResponse::ErrorCode code, const String& errorMessage)
- : m_code(code)
- , m_errorMessage(errorMessage)
- {
- }
-
- std::unique_ptr<DictionaryValue> toDictionary() const {
- std::unique_ptr<protocol::DictionaryValue> error = DictionaryValue::create();
- error->setInteger("code", m_code);
- error->setString("message", m_errorMessage);
- if (m_data.length())
- error->setString("data", m_data);
- std::unique_ptr<protocol::DictionaryValue> message = DictionaryValue::create();
- message->setObject("error", std::move(error));
- if (m_hasCallId)
- message->setInteger("id", m_callId);
- return message;
- }
-
- DispatchResponse::ErrorCode m_code;
- String m_errorMessage;
- String m_data;
- int m_callId = 0;
- bool m_hasCallId = false;
-};
-
-} // namespace
-
-static void reportProtocolErrorTo(FrontendChannel* frontendChannel, int callId, DispatchResponse::ErrorCode code, const String& errorMessage, ErrorSupport* errors)
-{
- if (frontendChannel)
- frontendChannel->sendProtocolResponse(callId, ProtocolError::createErrorResponse(callId, code, errorMessage, errors));
-}
-
-static void reportProtocolErrorTo(FrontendChannel* frontendChannel, DispatchResponse::ErrorCode code, const String& errorMessage)
-{
- if (frontendChannel)
- frontendChannel->sendProtocolNotification(ProtocolError::createErrorNotification(code, errorMessage));
-}
-
-void DispatcherBase::reportProtocolError(int callId, DispatchResponse::ErrorCode code, const String& errorMessage, ErrorSupport* errors)
-{
- reportProtocolErrorTo(m_frontendChannel, callId, code, errorMessage, errors);
-}
-
-void DispatcherBase::clearFrontend()
-{
- m_frontendChannel = nullptr;
- for (auto& weak : m_weakPtrs)
- weak->dispose();
- m_weakPtrs.clear();
-}
-
-std::unique_ptr<DispatcherBase::WeakPtr> DispatcherBase::weakPtr()
-{
- std::unique_ptr<DispatcherBase::WeakPtr> weak(new DispatcherBase::WeakPtr(this));
- m_weakPtrs.insert(weak.get());
- return weak;
-}
-
-UberDispatcher::UberDispatcher(FrontendChannel* frontendChannel)
- : m_frontendChannel(frontendChannel) { }
-
-void UberDispatcher::registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase> dispatcher)
-{
- m_dispatchers[name] = std::move(dispatcher);
-}
-
-void UberDispatcher::setupRedirects(const std::unordered_map<String, String>& redirects)
-{
- for (const auto& pair : redirects)
- m_redirects[pair.first] = pair.second;
-}
-
-bool UberDispatcher::parseCommand(Value* parsedMessage, int* outCallId, String* outMethod) {
- if (!parsedMessage) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kParseError, "Message must be a valid JSON");
- return false;
- }
- protocol::DictionaryValue* messageObject = DictionaryValue::cast(parsedMessage);
- if (!messageObject) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must be an object");
- return false;
- }
-
- int callId = 0;
- protocol::Value* callIdValue = messageObject->get("id");
- bool success = callIdValue && callIdValue->asInteger(&callId);
- if (!success) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have integer 'id' property");
- return false;
- }
- if (outCallId)
- *outCallId = callId;
-
- protocol::Value* methodValue = messageObject->get("method");
- String method;
- success = methodValue && methodValue->asString(&method);
- if (!success) {
- reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' property", nullptr);
- return false;
- }
- if (outMethod)
- *outMethod = method;
- return true;
-}
-
-protocol::DispatcherBase* UberDispatcher::findDispatcher(const String& method) {
- size_t dotIndex = StringUtil::find(method, ".");
- if (dotIndex == StringUtil::kNotFound)
- return nullptr;
- String domain = StringUtil::substring(method, 0, dotIndex);
- auto it = m_dispatchers.find(domain);
- if (it == m_dispatchers.end())
- return nullptr;
- if (!it->second->canDispatch(method))
- return nullptr;
- return it->second.get();
-}
-
-bool UberDispatcher::canDispatch(const String& in_method)
-{
- String method = in_method;
- auto redirectIt = m_redirects.find(method);
- if (redirectIt != m_redirects.end())
- method = redirectIt->second;
- return !!findDispatcher(method);
-}
-
-void UberDispatcher::dispatch(int callId, const String& in_method, std::unique_ptr<Value> parsedMessage, {{config.crdtp.namespace}}::span<uint8_t> rawMessage)
-{
- String method = in_method;
- auto redirectIt = m_redirects.find(method);
- if (redirectIt != m_redirects.end())
- method = redirectIt->second;
- protocol::DispatcherBase* dispatcher = findDispatcher(method);
- if (!dispatcher) {
- reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
- return;
- }
- std::unique_ptr<protocol::DictionaryValue> messageObject = DictionaryValue::cast(std::move(parsedMessage));
- dispatcher->dispatch(callId, method, rawMessage, std::move(messageObject));
-}
-
-UberDispatcher::~UberDispatcher() = default;
-
-// static
-std::unique_ptr<Serializable> InternalResponse::createResponse(int callId, std::unique_ptr<Serializable> params)
-{
- return std::unique_ptr<Serializable>(new InternalResponse(callId, nullptr, std::move(params)));
-}
-
-// static
-std::unique_ptr<Serializable> InternalResponse::createNotification(const char* method, std::unique_ptr<Serializable> params)
-{
- return std::unique_ptr<Serializable>(new InternalResponse(0, method, std::move(params)));
-}
-
-// static
-std::unique_ptr<Serializable> InternalResponse::createErrorResponse(int callId, DispatchResponse::ErrorCode code, const String& message)
-{
- return ProtocolError::createErrorResponse(callId, code, message, nullptr);
-}
-
-void InternalResponse::AppendSerialized(std::vector<uint8_t>* out) const
-{
- using {{config.crdtp.namespace}}::cbor::NewCBOREncoder;
- using {{config.crdtp.namespace}}::ParserHandler;
- using {{config.crdtp.namespace}}::Status;
- using {{config.crdtp.namespace}}::SpanFrom;
-
- Status status;
- std::unique_ptr<ParserHandler> encoder = NewCBOREncoder(out, &status);
- encoder->HandleMapBegin();
- if (m_method) {
- encoder->HandleString8(SpanFrom("method"));
- encoder->HandleString8(SpanFrom(m_method));
- encoder->HandleString8(SpanFrom("params"));
- } else {
- encoder->HandleString8(SpanFrom("id"));
- encoder->HandleInt32(m_callId);
- encoder->HandleString8(SpanFrom("result"));
- }
- if (m_params) {
- m_params->AppendSerialized(out);
- } else {
- encoder->HandleMapBegin();
- encoder->HandleMapEnd();
- }
- encoder->HandleMapEnd();
- DCHECK(status.ok());
-}
-
-InternalResponse::InternalResponse(int callId, const char* method, std::unique_ptr<Serializable> params)
- : m_callId(callId)
- , m_method(method)
- , m_params(params ? std::move(params) : nullptr)
-{
-}
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
deleted file mode 100644
index 5231123afc..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ /dev/null
@@ -1,178 +0,0 @@
-// This file is generated by DispatcherBase_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_DispatcherBase_h
-#define {{"_".join(config.protocol.namespace)}}_DispatcherBase_h
-
-//#include "Forward.h"
-//#include "ErrorSupport.h"
-//#include "Values.h"
-
-#include "{{config.crdtp.dir}}/span.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-class WeakPtr;
-
-class {{config.lib.export_macro}} DispatchResponse {
-public:
- enum Status {
- kSuccess = 0,
- kError = 1,
- kFallThrough = 2,
- };
-
- // For historical reasons, these error codes correspond to commonly used
- // XMLRPC codes (e.g. see METHOD_NOT_FOUND in
- // https://github.com/python/cpython/blob/master/Lib/xmlrpc/client.py).
- enum ErrorCode {
- kParseError = -32700,
- kInvalidRequest = -32600,
- kMethodNotFound = -32601,
- kInvalidParams = -32602,
- kInternalError = -32603,
- kServerError = -32000,
- };
-
- Status status() const { return m_status; }
- const String& errorMessage() const { return m_errorMessage; }
- ErrorCode errorCode() const { return m_errorCode; }
- bool isSuccess() const { return m_status == kSuccess; }
-
- static DispatchResponse OK();
- static DispatchResponse Error(const String&);
- static DispatchResponse InternalError();
- static DispatchResponse InvalidParams(const String&);
- static DispatchResponse FallThrough();
-
-private:
- Status m_status;
- String m_errorMessage;
- ErrorCode m_errorCode;
-};
-
-class {{config.lib.export_macro}} DispatcherBase {
- PROTOCOL_DISALLOW_COPY(DispatcherBase);
-public:
- static const char kInvalidParamsString[];
- class {{config.lib.export_macro}} WeakPtr {
- public:
- explicit WeakPtr(DispatcherBase*);
- ~WeakPtr();
- DispatcherBase* get() { return m_dispatcher; }
- void dispose() { m_dispatcher = nullptr; }
-
- private:
- DispatcherBase* m_dispatcher;
- };
-
- class {{config.lib.export_macro}} Callback {
- public:
- Callback(std::unique_ptr<WeakPtr> backendImpl, int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message);
- virtual ~Callback();
- void dispose();
-
- protected:
- void sendIfActive(std::unique_ptr<protocol::DictionaryValue> partialMessage, const DispatchResponse& response);
- void fallThroughIfActive();
-
- private:
- std::unique_ptr<WeakPtr> m_backendImpl;
- int m_callId;
- String m_method;
- std::vector<uint8_t> m_message;
- };
-
- explicit DispatcherBase(FrontendChannel*);
- virtual ~DispatcherBase();
-
- virtual bool canDispatch(const String& method) = 0;
- virtual void dispatch(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> rawMessage, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
- FrontendChannel* channel() { return m_frontendChannel; }
-
- void sendResponse(int callId, const DispatchResponse&, std::unique_ptr<protocol::DictionaryValue> result);
- void sendResponse(int callId, const DispatchResponse&);
-
- void reportProtocolError(int callId, DispatchResponse::ErrorCode, const String& errorMessage, ErrorSupport* errors);
- void clearFrontend();
-
- std::unique_ptr<WeakPtr> weakPtr();
-
-private:
- FrontendChannel* m_frontendChannel;
- std::unordered_set<WeakPtr*> m_weakPtrs;
-};
-
-class {{config.lib.export_macro}} UberDispatcher {
- PROTOCOL_DISALLOW_COPY(UberDispatcher);
-public:
- explicit UberDispatcher(FrontendChannel*);
- void registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase>);
- void setupRedirects(const std::unordered_map<String, String>&);
- bool parseCommand(Value* message, int* callId, String* method);
- bool canDispatch(const String& method);
- void dispatch(int callId, const String& method, std::unique_ptr<Value> message, {{config.crdtp.namespace}}::span<uint8_t> rawMessage);
- FrontendChannel* channel() { return m_frontendChannel; }
- virtual ~UberDispatcher();
-
-private:
- protocol::DispatcherBase* findDispatcher(const String& method);
- FrontendChannel* m_frontendChannel;
- std::unordered_map<String, String> m_redirects;
- std::unordered_map<String, std::unique_ptr<protocol::DispatcherBase>> m_dispatchers;
-};
-
-class InternalResponse : public Serializable {
- PROTOCOL_DISALLOW_COPY(InternalResponse);
-public:
- static std::unique_ptr<Serializable> createResponse(int callId, std::unique_ptr<Serializable> params);
- static std::unique_ptr<Serializable> createNotification(const char* method, std::unique_ptr<Serializable> params = nullptr);
- static std::unique_ptr<Serializable> createErrorResponse(int callId, DispatchResponse::ErrorCode code, const String& message);
-
- void AppendSerialized(std::vector<uint8_t>* out) const override;
-
- ~InternalResponse() override {}
-
-private:
- InternalResponse(int callId, const char* method, std::unique_ptr<Serializable> params);
-
- int m_callId;
- const char* m_method = nullptr;
- std::unique_ptr<Serializable> m_params;
-};
-
-class InternalRawNotification : public Serializable {
-public:
- static std::unique_ptr<InternalRawNotification> fromBinary(std::vector<uint8_t> notification)
- {
- return std::unique_ptr<InternalRawNotification>(new InternalRawNotification(std::move(notification)));
- }
-
- ~InternalRawNotification() override {}
-
- std::vector<uint8_t> TakeSerialized() && override {
- return std::move(m_binaryNotification);
- }
-
- void AppendSerialized(std::vector<uint8_t>* out) const override
- {
- out->insert(out->end(), m_binaryNotification.begin(), m_binaryNotification.end());
- }
-
-private:
- explicit InternalRawNotification(std::vector<uint8_t> notification)
- : m_binaryNotification(std::move(notification)) { }
-
- std::vector<uint8_t> m_binaryNotification;
-};
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_DispatcherBase_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
deleted file mode 100644
index a5c2a79bbd..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
+++ /dev/null
@@ -1,73 +0,0 @@
-// This file is generated by ErrorSupport_cpp.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#include "ErrorSupport.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-ErrorSupport::ErrorSupport() { }
-ErrorSupport::~ErrorSupport() { }
-
-void ErrorSupport::setName(const char* name)
-{
- setName(String(name));
-}
-
-void ErrorSupport::setName(const String& name)
-{
- DCHECK(m_path.size());
- m_path[m_path.size() - 1] = name;
-}
-
-void ErrorSupport::push()
-{
- m_path.push_back(String());
-}
-
-void ErrorSupport::pop()
-{
- m_path.pop_back();
-}
-
-void ErrorSupport::addError(const char* error)
-{
- addError(String(error));
-}
-
-void ErrorSupport::addError(const String& error)
-{
- StringBuilder builder;
- for (size_t i = 0; i < m_path.size(); ++i) {
- if (i)
- StringUtil::builderAppend(builder, '.');
- StringUtil::builderAppend(builder, m_path[i]);
- }
- StringUtil::builderAppend(builder, ": ");
- StringUtil::builderAppend(builder, error);
- m_errors.push_back(StringUtil::builderToString(builder));
-}
-
-bool ErrorSupport::hasErrors()
-{
- return !!m_errors.size();
-}
-
-String ErrorSupport::errors()
-{
- StringBuilder builder;
- for (size_t i = 0; i < m_errors.size(); ++i) {
- if (i)
- StringUtil::builderAppend(builder, "; ");
- StringUtil::builderAppend(builder, m_errors[i]);
- }
- return StringUtil::builderToString(builder);
-}
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
deleted file mode 100644
index f317a3cfb4..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
+++ /dev/null
@@ -1,39 +0,0 @@
-// This file is generated by ErrorSupport_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_ErrorSupport_h
-#define {{"_".join(config.protocol.namespace)}}_ErrorSupport_h
-
-#include {{format_include(config.protocol.package, "Forward")}}
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-class {{config.lib.export_macro}} ErrorSupport {
-public:
- ErrorSupport();
- ~ErrorSupport();
-
- void push();
- void setName(const char*);
- void setName(const String&);
- void pop();
- void addError(const char*);
- void addError(const String&);
- bool hasErrors();
- String errors();
-
-private:
- std::vector<String> m_path;
- std::vector<String> m_errors;
-};
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_ErrorSupport_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
index aefc1a16e8..e2eef3042f 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
@@ -18,6 +18,9 @@
#include <unordered_map>
#include <unordered_set>
+#include "{{config.crdtp.dir}}/error_support.h"
+#include "{{config.crdtp.dir}}/dispatch.h"
+#include "{{config.crdtp.dir}}/frontend_channel.h"
#include "{{config.crdtp.dir}}/glue.h"
{% for namespace in config.protocol.namespace %}
@@ -25,15 +28,18 @@ namespace {{namespace}} {
{% endfor %}
class DictionaryValue;
-class DispatchResponse;
-class ErrorSupport;
+using DispatchResponse = {{config.crdtp.namespace}}::DispatchResponse;
+using ErrorSupport = {{config.crdtp.namespace}}::ErrorSupport;
+using Serializable = {{config.crdtp.namespace}}::Serializable;
+using FrontendChannel = {{config.crdtp.namespace}}::FrontendChannel;
+using DomainDispatcher = {{config.crdtp.namespace}}::DomainDispatcher;
+using UberDispatcher = {{config.crdtp.namespace}}::UberDispatcher;
class FundamentalValue;
class ListValue;
class Object;
using Response = DispatchResponse;
class SerializedValue;
class StringValue;
-class UberDispatcher;
class Value;
namespace detail {
diff --git a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
deleted file mode 100644
index b677ecdd48..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
+++ /dev/null
@@ -1,33 +0,0 @@
-// This file is generated by FrontendChannel_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_FrontendChannel_h
-#define {{"_".join(config.protocol.namespace)}}_FrontendChannel_h
-
-#include "{{config.crdtp.dir}}/serializable.h"
-#include "{{config.crdtp.dir}}/span.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-using {{config.crdtp.namespace}}::Serializable;
-
-class {{config.lib.export_macro}} FrontendChannel {
-public:
- virtual ~FrontendChannel() { }
- virtual void sendProtocolResponse(int callId, std::unique_ptr<Serializable> message) = 0;
- virtual void sendProtocolNotification(std::unique_ptr<Serializable> message) = 0;
-
- virtual void fallThrough(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message) = 0;
- virtual void flushProtocolNotifications() = 0;
-};
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_FrontendChannel_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
index 3fc36b9ee3..b639b1bb77 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
@@ -14,7 +14,7 @@ std::unique_ptr<Object> Object::fromValue(protocol::Value* value, ErrorSupport*
{
protocol::DictionaryValue* dictionary = DictionaryValue::cast(value);
if (!dictionary) {
- errors->addError("object expected");
+ errors->AddError("object expected");
return nullptr;
}
dictionary = static_cast<protocol::DictionaryValue*>(dictionary->clone().release());
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
deleted file mode 100644
index ea7ecc5a1a..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
+++ /dev/null
@@ -1,548 +0,0 @@
-// This file is generated by Parser_cpp.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-namespace {
-
-const int stackLimit = 1000;
-
-enum Token {
- ObjectBegin,
- ObjectEnd,
- ArrayBegin,
- ArrayEnd,
- StringLiteral,
- Number,
- BoolTrue,
- BoolFalse,
- NullToken,
- ListSeparator,
- ObjectPairSeparator,
- InvalidToken,
-};
-
-const char* const nullString = "null";
-const char* const trueString = "true";
-const char* const falseString = "false";
-
-bool isASCII(uint16_t c)
-{
- return !(c & ~0x7F);
-}
-
-bool isSpaceOrNewLine(uint16_t c)
-{
- return isASCII(c) && c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
-}
-
-double charactersToDouble(const uint16_t* characters, size_t length, bool* ok)
-{
- std::vector<char> buffer;
- buffer.reserve(length + 1);
- for (size_t i = 0; i < length; ++i) {
- if (!isASCII(characters[i])) {
- *ok = false;
- return 0;
- }
- buffer.push_back(static_cast<char>(characters[i]));
- }
- buffer.push_back('\0');
- return StringUtil::toDouble(buffer.data(), length, ok);
-}
-
-double charactersToDouble(const uint8_t* characters, size_t length, bool* ok)
-{
- std::string buffer(reinterpret_cast<const char*>(characters), length);
- return StringUtil::toDouble(buffer.data(), length, ok);
-}
-
-template<typename Char>
-bool parseConstToken(const Char* start, const Char* end, const Char** tokenEnd, const char* token)
-{
- while (start < end && *token != '\0' && *start++ == *token++) { }
- if (*token != '\0')
- return false;
- *tokenEnd = start;
- return true;
-}
-
-template<typename Char>
-bool readInt(const Char* start, const Char* end, const Char** tokenEnd, bool canHaveLeadingZeros)
-{
- if (start == end)
- return false;
- bool haveLeadingZero = '0' == *start;
- int length = 0;
- while (start < end && '0' <= *start && *start <= '9') {
- ++start;
- ++length;
- }
- if (!length)
- return false;
- if (!canHaveLeadingZeros && length > 1 && haveLeadingZero)
- return false;
- *tokenEnd = start;
- return true;
-}
-
-template<typename Char>
-bool parseNumberToken(const Char* start, const Char* end, const Char** tokenEnd)
-{
- // We just grab the number here. We validate the size in DecodeNumber.
- // According to RFC4627, a valid number is: [minus] int [frac] [exp]
- if (start == end)
- return false;
- Char c = *start;
- if ('-' == c)
- ++start;
-
- if (!readInt(start, end, &start, false))
- return false;
- if (start == end) {
- *tokenEnd = start;
- return true;
- }
-
- // Optional fraction part
- c = *start;
- if ('.' == c) {
- ++start;
- if (!readInt(start, end, &start, true))
- return false;
- if (start == end) {
- *tokenEnd = start;
- return true;
- }
- c = *start;
- }
-
- // Optional exponent part
- if ('e' == c || 'E' == c) {
- ++start;
- if (start == end)
- return false;
- c = *start;
- if ('-' == c || '+' == c) {
- ++start;
- if (start == end)
- return false;
- }
- if (!readInt(start, end, &start, true))
- return false;
- }
-
- *tokenEnd = start;
- return true;
-}
-
-template<typename Char>
-bool readHexDigits(const Char* start, const Char* end, const Char** tokenEnd, int digits)
-{
- if (end - start < digits)
- return false;
- for (int i = 0; i < digits; ++i) {
- Char c = *start++;
- if (!(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')))
- return false;
- }
- *tokenEnd = start;
- return true;
-}
-
-template<typename Char>
-bool parseStringToken(const Char* start, const Char* end, const Char** tokenEnd)
-{
- while (start < end) {
- Char c = *start++;
- if ('\\' == c) {
- if (start == end)
- return false;
- c = *start++;
- // Make sure the escaped char is valid.
- switch (c) {
- case 'x':
- if (!readHexDigits(start, end, &start, 2))
- return false;
- break;
- case 'u':
- if (!readHexDigits(start, end, &start, 4))
- return false;
- break;
- case '\\':
- case '/':
- case 'b':
- case 'f':
- case 'n':
- case 'r':
- case 't':
- case 'v':
- case '"':
- break;
- default:
- return false;
- }
- } else if ('"' == c) {
- *tokenEnd = start;
- return true;
- }
- }
- return false;
-}
-
-template<typename Char>
-bool skipComment(const Char* start, const Char* end, const Char** commentEnd)
-{
- if (start == end)
- return false;
-
- if (*start != '/' || start + 1 >= end)
- return false;
- ++start;
-
- if (*start == '/') {
- // Single line comment, read to newline.
- for (++start; start < end; ++start) {
- if (*start == '\n' || *start == '\r') {
- *commentEnd = start + 1;
- return true;
- }
- }
- *commentEnd = end;
- // Comment reaches end-of-input, which is fine.
- return true;
- }
-
- if (*start == '*') {
- Char previous = '\0';
- // Block comment, read until end marker.
- for (++start; start < end; previous = *start++) {
- if (previous == '*' && *start == '/') {
- *commentEnd = start + 1;
- return true;
- }
- }
- // Block comment must close before end-of-input.
- return false;
- }
-
- return false;
-}
-
-template<typename Char>
-void skipWhitespaceAndComments(const Char* start, const Char* end, const Char** whitespaceEnd)
-{
- while (start < end) {
- if (isSpaceOrNewLine(*start)) {
- ++start;
- } else if (*start == '/') {
- const Char* commentEnd;
- if (!skipComment(start, end, &commentEnd))
- break;
- start = commentEnd;
- } else {
- break;
- }
- }
- *whitespaceEnd = start;
-}
-
-template<typename Char>
-Token parseToken(const Char* start, const Char* end, const Char** tokenStart, const Char** tokenEnd)
-{
- skipWhitespaceAndComments(start, end, tokenStart);
- start = *tokenStart;
-
- if (start == end)
- return InvalidToken;
-
- switch (*start) {
- case 'n':
- if (parseConstToken(start, end, tokenEnd, nullString))
- return NullToken;
- break;
- case 't':
- if (parseConstToken(start, end, tokenEnd, trueString))
- return BoolTrue;
- break;
- case 'f':
- if (parseConstToken(start, end, tokenEnd, falseString))
- return BoolFalse;
- break;
- case '[':
- *tokenEnd = start + 1;
- return ArrayBegin;
- case ']':
- *tokenEnd = start + 1;
- return ArrayEnd;
- case ',':
- *tokenEnd = start + 1;
- return ListSeparator;
- case '{':
- *tokenEnd = start + 1;
- return ObjectBegin;
- case '}':
- *tokenEnd = start + 1;
- return ObjectEnd;
- case ':':
- *tokenEnd = start + 1;
- return ObjectPairSeparator;
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- case '-':
- if (parseNumberToken(start, end, tokenEnd))
- return Number;
- break;
- case '"':
- if (parseStringToken(start + 1, end, tokenEnd))
- return StringLiteral;
- break;
- }
- return InvalidToken;
-}
-
-template<typename Char>
-int hexToInt(Char c)
-{
- if ('0' <= c && c <= '9')
- return c - '0';
- if ('A' <= c && c <= 'F')
- return c - 'A' + 10;
- if ('a' <= c && c <= 'f')
- return c - 'a' + 10;
- DCHECK(false);
- return 0;
-}
-
-template<typename Char>
-bool decodeString(const Char* start, const Char* end, StringBuilder* output)
-{
- while (start < end) {
- uint16_t c = *start++;
- if ('\\' != c) {
- StringUtil::builderAppend(*output, c);
- continue;
- }
- if (start == end)
- return false;
- c = *start++;
-
- if (c == 'x') {
- // \x is not supported.
- return false;
- }
-
- switch (c) {
- case '"':
- case '/':
- case '\\':
- break;
- case 'b':
- c = '\b';
- break;
- case 'f':
- c = '\f';
- break;
- case 'n':
- c = '\n';
- break;
- case 'r':
- c = '\r';
- break;
- case 't':
- c = '\t';
- break;
- case 'v':
- c = '\v';
- break;
- case 'u':
- c = (hexToInt(*start) << 12) +
- (hexToInt(*(start + 1)) << 8) +
- (hexToInt(*(start + 2)) << 4) +
- hexToInt(*(start + 3));
- start += 4;
- break;
- default:
- return false;
- }
- StringUtil::builderAppend(*output, c);
- }
- return true;
-}
-
-template<typename Char>
-bool decodeString(const Char* start, const Char* end, String* output)
-{
- if (start == end) {
- *output = "";
- return true;
- }
- if (start > end)
- return false;
- StringBuilder buffer;
- StringUtil::builderReserve(buffer, end - start);
- if (!decodeString(start, end, &buffer))
- return false;
- *output = StringUtil::builderToString(buffer);
- return true;
-}
-
-template<typename Char>
-std::unique_ptr<Value> buildValue(const Char* start, const Char* end, const Char** valueTokenEnd, int depth)
-{
- if (depth > stackLimit)
- return nullptr;
-
- std::unique_ptr<Value> result;
- const Char* tokenStart;
- const Char* tokenEnd;
- Token token = parseToken(start, end, &tokenStart, &tokenEnd);
- switch (token) {
- case InvalidToken:
- return nullptr;
- case NullToken:
- result = Value::null();
- break;
- case BoolTrue:
- result = FundamentalValue::create(true);
- break;
- case BoolFalse:
- result = FundamentalValue::create(false);
- break;
- case Number: {
- bool ok;
- double value = charactersToDouble(tokenStart, tokenEnd - tokenStart, &ok);
- if (!ok)
- return nullptr;
- if (value >= INT_MIN && value <= INT_MAX && static_cast<int>(value) == value)
- result = FundamentalValue::create(static_cast<int>(value));
- else
- result = FundamentalValue::create(value);
- break;
- }
- case StringLiteral: {
- String value;
- bool ok = decodeString(tokenStart + 1, tokenEnd - 1, &value);
- if (!ok)
- return nullptr;
- result = StringValue::create(value);
- break;
- }
- case ArrayBegin: {
- std::unique_ptr<ListValue> array = ListValue::create();
- start = tokenEnd;
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- while (token != ArrayEnd) {
- std::unique_ptr<Value> arrayNode = buildValue(start, end, &tokenEnd, depth + 1);
- if (!arrayNode)
- return nullptr;
- array->pushValue(std::move(arrayNode));
-
- // After a list value, we expect a comma or the end of the list.
- start = tokenEnd;
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- if (token == ListSeparator) {
- start = tokenEnd;
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- if (token == ArrayEnd)
- return nullptr;
- } else if (token != ArrayEnd) {
- // Unexpected value after list value. Bail out.
- return nullptr;
- }
- }
- if (token != ArrayEnd)
- return nullptr;
- result = std::move(array);
- break;
- }
- case ObjectBegin: {
- std::unique_ptr<DictionaryValue> object = DictionaryValue::create();
- start = tokenEnd;
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- while (token != ObjectEnd) {
- if (token != StringLiteral)
- return nullptr;
- String key;
- if (!decodeString(tokenStart + 1, tokenEnd - 1, &key))
- return nullptr;
- start = tokenEnd;
-
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- if (token != ObjectPairSeparator)
- return nullptr;
- start = tokenEnd;
-
- std::unique_ptr<Value> value = buildValue(start, end, &tokenEnd, depth + 1);
- if (!value)
- return nullptr;
- object->setValue(key, std::move(value));
- start = tokenEnd;
-
- // After a key/value pair, we expect a comma or the end of the
- // object.
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- if (token == ListSeparator) {
- start = tokenEnd;
- token = parseToken(start, end, &tokenStart, &tokenEnd);
- if (token == ObjectEnd)
- return nullptr;
- } else if (token != ObjectEnd) {
- // Unexpected value after last object value. Bail out.
- return nullptr;
- }
- }
- if (token != ObjectEnd)
- return nullptr;
- result = std::move(object);
- break;
- }
-
- default:
- // We got a token that's not a value.
- return nullptr;
- }
-
- skipWhitespaceAndComments(tokenEnd, end, valueTokenEnd);
- return result;
-}
-
-template<typename Char>
-std::unique_ptr<Value> parseJSONInternal(const Char* start, unsigned length)
-{
- const Char* end = start + length;
- const Char *tokenEnd;
- std::unique_ptr<Value> value = buildValue(start, end, &tokenEnd, 0);
- if (!value || tokenEnd != end)
- return nullptr;
- return value;
-}
-
-} // anonymous namespace
-
-std::unique_ptr<Value> parseJSONCharacters(const uint16_t* characters, unsigned length)
-{
- return parseJSONInternal<uint16_t>(characters, length);
-}
-
-std::unique_ptr<Value> parseJSONCharacters(const uint8_t* characters, unsigned length)
-{
- return parseJSONInternal<uint8_t>(characters, length);
-}
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_h.template b/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
deleted file mode 100644
index 1832c2e972..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
+++ /dev/null
@@ -1,24 +0,0 @@
-// This file is generated by Parser_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_Parser_h
-#define {{"_".join(config.protocol.namespace)}}_Parser_h
-
-//#include "Forward.h"
-//#include "Values.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-{{config.lib.export_macro}} std::unique_ptr<Value> parseJSONCharacters(const uint8_t*, unsigned);
-{{config.lib.export_macro}} std::unique_ptr<Value> parseJSONCharacters(const uint16_t*, unsigned);
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_Parser_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
index 63baf689c6..15961a6321 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
@@ -40,7 +40,7 @@ struct ValueConversions<bool> {
bool result = false;
bool success = value ? value->asBoolean(&result) : false;
if (!success)
- errors->addError("boolean value expected");
+ errors->AddError("boolean value expected");
return result;
}
@@ -57,7 +57,7 @@ struct ValueConversions<int> {
int result = 0;
bool success = value ? value->asInteger(&result) : false;
if (!success)
- errors->addError("integer value expected");
+ errors->AddError("integer value expected");
return result;
}
@@ -74,7 +74,7 @@ struct ValueConversions<double> {
double result = 0;
bool success = value ? value->asDouble(&result) : false;
if (!success)
- errors->addError("double value expected");
+ errors->AddError("double value expected");
return result;
}
@@ -91,7 +91,7 @@ struct ValueConversions<String> {
String result;
bool success = value ? value->asString(&result) : false;
if (!success)
- errors->addError("string value expected");
+ errors->AddError("string value expected");
return result;
}
@@ -107,7 +107,7 @@ struct ValueConversions<Binary> {
{
if (!value ||
(value->type() != Value::TypeBinary && value->type() != Value::TypeString)) {
- errors->addError("Either string base64 or binary value expected");
+ errors->AddError("Either string base64 or binary value expected");
return Binary();
}
Binary binary;
@@ -118,7 +118,7 @@ struct ValueConversions<Binary> {
bool success;
Binary out = Binary::fromBase64(result, &success);
if (!success)
- errors->addError("base64 decoding error");
+ errors->AddError("base64 decoding error");
return out;
}
@@ -133,20 +133,20 @@ struct ValueConversions<std::vector<std::unique_ptr<T>>> {
static std::unique_ptr<std::vector<std::unique_ptr<T>>> fromValue(protocol::Value* value, ErrorSupport* errors) {
protocol::ListValue* array = ListValue::cast(value);
if (!array) {
- errors->addError("array expected");
+ errors->AddError("array expected");
return nullptr;
}
- errors->push();
+ errors->Push();
std::unique_ptr<std::vector<std::unique_ptr<T>>> result(
new std::vector<std::unique_ptr<T>>());
result->reserve(array->size());
for (size_t i = 0; i < array->size(); ++i) {
- errors->setName(StringUtil::fromInteger(i));
+ errors->SetIndex(i);
auto item = ValueConversions<T>::fromValue(array->at(i), errors);
result->emplace_back(std::move(item));
}
- errors->pop();
- if (errors->hasErrors())
+ errors->Pop();
+ if (!errors->Errors().empty())
return nullptr;
return result;
}
@@ -167,19 +167,19 @@ struct ValueConversions<std::vector<T>> {
static std::unique_ptr<std::vector<T>> fromValue(protocol::Value* value, ErrorSupport* errors) {
protocol::ListValue* array = ListValue::cast(value);
if (!array) {
- errors->addError("array expected");
+ errors->AddError("array expected");
return nullptr;
}
- errors->push();
+ errors->Push();
std::unique_ptr<std::vector<T>> result(new std::vector<T>());
result->reserve(array->size());
for (size_t i = 0; i < array->size(); ++i) {
- errors->setName(StringUtil::fromInteger(i));
+ errors->SetIndex(i);
auto item = ValueConversions<T>::fromValue(array->at(i), errors);
result->emplace_back(std::move(item));
}
- errors->pop();
- if (errors->hasErrors())
+ errors->Pop();
+ if (!errors->Errors().empty())
return nullptr;
return result;
}
@@ -200,7 +200,7 @@ struct ValueConversions<Value> {
{
bool success = !!value;
if (!success) {
- errors->addError("value expected");
+ errors->AddError("value expected");
return nullptr;
}
return value->clone();
@@ -223,7 +223,7 @@ struct ValueConversions<DictionaryValue> {
{
bool success = value && value->type() == protocol::Value::TypeObject;
if (!success)
- errors->addError("object expected");
+ errors->AddError("object expected");
return DictionaryValue::cast(value->clone());
}
@@ -244,7 +244,7 @@ struct ValueConversions<ListValue> {
{
bool success = value && value->type() == protocol::Value::TypeArray;
if (!success)
- errors->addError("list expected");
+ errors->AddError("list expected");
return ListValue::cast(value->clone());
}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
index 91cd471983..09f3bed136 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
@@ -13,16 +13,11 @@ namespace {{namespace}} {
{% endfor %}
namespace {
-// When parsing CBOR, we limit recursion depth for objects and arrays
-// to this constant.
-static constexpr int kStackLimitValues = 1000;
-
-using {{config.crdtp.namespace}}::Error;
using {{config.crdtp.namespace}}::Status;
+using {{config.crdtp.namespace}}::ParserHandler;
using {{config.crdtp.namespace}}::span;
namespace cbor {
-using {{config.crdtp.namespace}}::cbor::CBORTokenTag;
-using {{config.crdtp.namespace}}::cbor::CBORTokenizer;
+using {{config.crdtp.namespace}}::cbor::ParseCBOR;
using {{config.crdtp.namespace}}::cbor::EncodeBinary;
using {{config.crdtp.namespace}}::cbor::EncodeDouble;
using {{config.crdtp.namespace}}::cbor::EncodeFalse;
@@ -39,165 +34,161 @@ using {{config.crdtp.namespace}}::cbor::EnvelopeEncoder;
using {{config.crdtp.namespace}}::cbor::InitialByteForEnvelope;
} // namespace cbor
-// Below are three parsing routines for CBOR, which cover enough
-// to roundtrip JSON messages.
-std::unique_ptr<DictionaryValue> parseMap(int32_t stack_depth, cbor::CBORTokenizer* tokenizer);
-std::unique_ptr<ListValue> parseArray(int32_t stack_depth, cbor::CBORTokenizer* tokenizer);
-std::unique_ptr<Value> parseValue(int32_t stack_depth, cbor::CBORTokenizer* tokenizer);
-
-// |bytes| must start with the indefinite length array byte, so basically,
-// ParseArray may only be called after an indefinite length array has been
-// detected.
-std::unique_ptr<ListValue> parseArray(int32_t stack_depth, cbor::CBORTokenizer* tokenizer) {
- DCHECK(tokenizer->TokenTag() == cbor::CBORTokenTag::ARRAY_START);
- tokenizer->Next();
- auto list = ListValue::create();
- while (tokenizer->TokenTag() != cbor::CBORTokenTag::STOP) {
- // Error::CBOR_UNEXPECTED_EOF_IN_ARRAY
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::DONE) return nullptr;
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::ERROR_VALUE) return nullptr;
- // Parse value.
- auto value = parseValue(stack_depth, tokenizer);
- if (!value) return nullptr;
- list->pushValue(std::move(value));
+// Uses the parsing events received from driver of |ParserHandler|
+// (e.g. cbor::ParseCBOR) into a protocol::Value instance.
+class ValueParserHandler : public ParserHandler {
+ public:
+ // Provides the parsed protocol::Value.
+ std::unique_ptr<Value> ReleaseRoot() { return std::move(root_); }
+
+ // The first parsing error encountered; |status().ok()| is the default.
+ Status status() const { return status_; }
+
+ private:
+ //
+ // Implementation of ParserHandler.
+ //
+ void HandleMapBegin() override {
+ if (!status_.ok()) return;
+ std::unique_ptr<DictionaryValue> dict = DictionaryValue::create();
+ DictionaryValue* dict_ptr = dict.get();
+ AddValueToParent(std::move(dict));
+ stack_.emplace_back(dict_ptr);
}
- tokenizer->Next();
- return list;
-}
-
-std::unique_ptr<Value> parseValue(
- int32_t stack_depth, cbor::CBORTokenizer* tokenizer) {
- // Error::CBOR_STACK_LIMIT_EXCEEDED
- if (stack_depth > kStackLimitValues) return nullptr;
- // Skip past the envelope to get to what's inside.
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::ENVELOPE)
- tokenizer->EnterEnvelope();
- switch (tokenizer->TokenTag()) {
- case cbor::CBORTokenTag::ERROR_VALUE:
- return nullptr;
- case cbor::CBORTokenTag::DONE:
- // Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE
- return nullptr;
- case cbor::CBORTokenTag::TRUE_VALUE: {
- std::unique_ptr<Value> value = FundamentalValue::create(true);
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::FALSE_VALUE: {
- std::unique_ptr<Value> value = FundamentalValue::create(false);
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::NULL_VALUE: {
- std::unique_ptr<Value> value = FundamentalValue::null();
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::INT32: {
- std::unique_ptr<Value> value = FundamentalValue::create(tokenizer->GetInt32());
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::DOUBLE: {
- std::unique_ptr<Value> value = FundamentalValue::create(tokenizer->GetDouble());
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::STRING8: {
- span<uint8_t> str = tokenizer->GetString8();
- std::unique_ptr<Value> value =
- StringValue::create(StringUtil::fromUTF8(str.data(), str.size()));
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::STRING16: {
- span<uint8_t> wire = tokenizer->GetString16WireRep();
- DCHECK_EQ(wire.size() & 1, 0u);
- std::unique_ptr<Value> value = StringValue::create(StringUtil::fromUTF16LE(
- reinterpret_cast<const uint16_t*>(wire.data()), wire.size() / 2));
- tokenizer->Next();
- return value;
- }
- case cbor::CBORTokenTag::BINARY: {
- span<uint8_t> payload = tokenizer->GetBinary();
- tokenizer->Next();
- return BinaryValue::create(Binary::fromSpan(payload.data(), payload.size()));
- }
- case cbor::CBORTokenTag::MAP_START:
- return parseMap(stack_depth + 1, tokenizer);
- case cbor::CBORTokenTag::ARRAY_START:
- return parseArray(stack_depth + 1, tokenizer);
- default:
- // Error::CBOR_UNSUPPORTED_VALUE
- return nullptr;
+
+ void HandleMapEnd() override {
+ if (!status_.ok()) return;
+ DCHECK(!stack_.empty());
+ DCHECK(stack_.back().is_dict);
+ stack_.pop_back();
+ }
+
+ void HandleArrayBegin() override {
+ if (!status_.ok()) return;
+ std::unique_ptr<ListValue> list = ListValue::create();
+ ListValue* list_ptr = list.get();
+ AddValueToParent(std::move(list));
+ stack_.emplace_back(list_ptr);
+ }
+
+ void HandleArrayEnd() override {
+ if (!status_.ok()) return;
+ DCHECK(!stack_.empty());
+ DCHECK(!stack_.back().is_dict);
+ stack_.pop_back();
+ }
+
+ void HandleString8(span<uint8_t> chars) override {
+ AddStringToParent(StringUtil::fromUTF8(chars.data(), chars.size()));
+ }
+
+ void HandleString16(span<uint16_t> chars) override {
+ AddStringToParent(
+ StringUtil::fromUTF16LE(chars.data(), chars.size()));
+ }
+
+ void HandleBinary(span<uint8_t> bytes) override {
+ AddValueToParent(
+ BinaryValue::create(Binary::fromSpan(bytes.data(), bytes.size())));
+ }
+
+ void HandleDouble(double value) override {
+ AddValueToParent(FundamentalValue::create(value));
+ }
+
+ void HandleInt32(int32_t value) override {
+ AddValueToParent(FundamentalValue::create(value));
+ }
+
+ void HandleBool(bool value) override {
+ AddValueToParent(FundamentalValue::create(value));
+ }
+
+ void HandleNull() override {
+ AddValueToParent(Value::null());
+ }
+
+ void HandleError(Status error) override {
+ status_ = error;
}
-}
-// |bytes| must start with the indefinite length array byte, so basically,
-// ParseArray may only be called after an indefinite length array has been
-// detected.
-std::unique_ptr<DictionaryValue> parseMap(
- int32_t stack_depth, cbor::CBORTokenizer* tokenizer) {
- auto dict = DictionaryValue::create();
- tokenizer->Next();
- while (tokenizer->TokenTag() != cbor::CBORTokenTag::STOP) {
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::DONE) {
- // Error::CBOR_UNEXPECTED_EOF_IN_MAP
- return nullptr;
+ //
+ // Adding strings and values to the parent value.
+ // Strings are handled separately because they can be keys for
+ // dictionary values.
+ //
+ void AddStringToParent(String str) {
+ if (!status_.ok()) return;
+ if (!root_) {
+ DCHECK(!key_is_pending_);
+ root_ = StringValue::create(str);
+ } else if (stack_.back().is_dict) {
+ // If we already have a pending key, then this is the value of the
+ // key/value pair. Otherwise, it's the new pending key.
+ if (key_is_pending_) {
+ stack_.back().dict->setString(pending_key_, str);
+ key_is_pending_ = false;
+ } else {
+ pending_key_ = std::move(str);
+ key_is_pending_ = true;
+ }
+ } else { // Top of the stack is a list.
+ DCHECK(!key_is_pending_);
+ stack_.back().list->pushValue(StringValue::create(str));
}
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::ERROR_VALUE) return nullptr;
- // Parse key.
- String key;
- if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING8) {
- span<uint8_t> key_span = tokenizer->GetString8();
- key = StringUtil::fromUTF8(key_span.data(), key_span.size());
- tokenizer->Next();
- } else if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING16) {
- span<uint8_t> key_span = tokenizer->GetString16WireRep();
- if (key_span.size() & 1) return nullptr; // UTF16 is 2 byte multiple.
- key = StringUtil::fromUTF16LE(
- reinterpret_cast<const uint16_t*>(key_span.data()),
- key_span.size() / 2);
- tokenizer->Next();
- } else {
- // Error::CBOR_INVALID_MAP_KEY
- return nullptr;
+ }
+
+ void AddValueToParent(std::unique_ptr<Value> value) {
+ if (!status_.ok()) return;
+ if (!root_) {
+ DCHECK(!key_is_pending_);
+ root_ = std::move(value);
+ } else if (stack_.back().is_dict) {
+ DCHECK(key_is_pending_);
+ stack_.back().dict->setValue(pending_key_, std::move(value));
+ key_is_pending_ = false;
+ } else { // Top of the stack is a list.
+ DCHECK(!key_is_pending_);
+ stack_.back().list->pushValue(std::move(value));
}
- // Parse value.
- auto value = parseValue(stack_depth, tokenizer);
- if (!value) return nullptr;
- dict->setValue(key, std::move(value));
}
- tokenizer->Next();
- return dict;
-}
+ // |status_.ok()| is the default; if we receive an error event
+ // we keep the first one and stop modifying any other state.
+ Status status_;
+
+ // The root of the parsed protocol::Value tree.
+ std::unique_ptr<Value> root_;
+
+ // If root_ is a list or a dictionary, this stack keeps track of
+ // the container we're currently parsing as well as its ancestors.
+ struct ContainerState {
+ ContainerState(DictionaryValue* dict) : is_dict(true), dict(dict) {}
+ ContainerState(ListValue* list) : is_dict(false), list(list) {}
+
+ bool is_dict;
+ union {
+ DictionaryValue* dict;
+ ListValue* list;
+ };
+ };
+ std::vector<ContainerState> stack_;
+
+ // For maps, keys and values are alternating events, so we keep the
+ // key around and process it when the value arrives.
+ bool key_is_pending_ = false;
+ String pending_key_;
+};
} // anonymous namespace
// static
std::unique_ptr<Value> Value::parseBinary(const uint8_t* data, size_t size) {
- span<uint8_t> bytes(data, size);
-
- // Error::CBOR_NO_INPUT
- if (bytes.empty()) return nullptr;
-
- // Error::CBOR_INVALID_START_BYTE
- if (bytes[0] != cbor::InitialByteForEnvelope()) return nullptr;
-
- cbor::CBORTokenizer tokenizer(bytes);
- if (tokenizer.TokenTag() == cbor::CBORTokenTag::ERROR_VALUE) return nullptr;
-
- // We checked for the envelope start byte above, so the tokenizer
- // must agree here, since it's not an error.
- DCHECK(tokenizer.TokenTag() == cbor::CBORTokenTag::ENVELOPE);
- tokenizer.EnterEnvelope();
- // Error::MAP_START_EXPECTED
- if (tokenizer.TokenTag() != cbor::CBORTokenTag::MAP_START) return nullptr;
- std::unique_ptr<Value> result = parseMap(/*stack_depth=*/1, &tokenizer);
- if (!result) return nullptr;
- if (tokenizer.TokenTag() == cbor::CBORTokenTag::DONE) return result;
- if (tokenizer.TokenTag() == cbor::CBORTokenTag::ERROR_VALUE) return nullptr;
- // Error::CBOR_TRAILING_JUNK
+ ValueParserHandler handler;
+ cbor::ParseCBOR(span<uint8_t>(data, size), &handler);
+ // TODO(johannes): We have decent error info in handler.status(); provide
+ // a richer interface that makes this available to client code.
+ if (handler.status().ok())
+ return handler.ReleaseRoot();
return nullptr;
}
@@ -500,7 +491,7 @@ std::unique_ptr<Value> DictionaryValue::clone() const
DCHECK(value != m_data.cend() && value->second);
result->setValue(key, value->second->clone());
}
- return std::move(result);
+ return result;
}
DictionaryValue::DictionaryValue()
@@ -528,7 +519,7 @@ std::unique_ptr<Value> ListValue::clone() const
std::unique_ptr<ListValue> result = ListValue::create();
for (const std::unique_ptr<protocol::Value>& value : m_data)
result->pushValue(value->clone());
- return std::move(result);
+ return result;
}
ListValue::ListValue()
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_h.template b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
index 41d326e71e..8514123fb8 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
@@ -10,6 +10,8 @@
//#include "Allocator.h"
//#include "Forward.h"
+#include {{format_include(config.protocol.package, "Forward")}}
+
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
index edd3da99a9..b1ec1e475c 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
@@ -1,4 +1,4 @@
-// This file is generated by DispatcherBase_cpp.template.
+// This file is generated by base_string_adapter_cc.template.
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
@@ -59,7 +59,7 @@ std::unique_ptr<protocol::Value> toProtocolValue(
if (converted)
result->pushValue(std::move(converted));
}
- return std::move(result);
+ return result;
}
if (value->is_dict()) {
const base::DictionaryValue* dictionary = nullptr;
@@ -73,7 +73,7 @@ std::unique_ptr<protocol::Value> toProtocolValue(
if (converted)
result->setValue(it.key(), std::move(converted));
}
- return std::move(result);
+ return result;
}
return nullptr;
}
@@ -112,7 +112,7 @@ std::unique_ptr<base::Value> toBaseValue(Value* value, int depth) {
if (converted)
result->Append(std::move(converted));
}
- return std::move(result);
+ return result;
}
if (value->type() == Value::TypeObject) {
DictionaryValue* dict = DictionaryValue::cast(value);
@@ -124,35 +124,11 @@ std::unique_ptr<base::Value> toBaseValue(Value* value, int depth) {
if (converted)
result->SetWithoutPathExpansion(entry.first, std::move(converted));
}
- return std::move(result);
+ return result;
}
return nullptr;
}
-StringBuilder::StringBuilder() {}
-
-StringBuilder::~StringBuilder() {}
-
-void StringBuilder::append(const std::string& s) {
- string_ += s;
-}
-
-void StringBuilder::append(char c) {
- string_ += c;
-}
-
-void StringBuilder::append(const char* characters, size_t length) {
- string_.append(characters, length);
-}
-
-std::string StringBuilder::toString() {
- return string_;
-}
-
-void StringBuilder::reserveCapacity(size_t capacity) {
- string_.reserve(capacity);
-}
-
// In Chromium, we do not support big endian architectures, so no conversion is needed
// to interpret UTF16LE.
// static
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
index 32bd7ea233..ff40aba363 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
@@ -1,4 +1,4 @@
-// This file is generated by Parser_h.template.
+// This file is generated by base_string_adapter_h.template.
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
@@ -14,7 +14,6 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted_memory.h"
-#include "base/strings/string_number_conversions.h"
#include "{{config.crdtp.dir}}/serializable.h"
{% if config.lib.export_header %}
@@ -33,64 +32,8 @@ class Value;
using String = std::string;
-class {{config.lib.export_macro}} StringBuilder {
- public:
- StringBuilder();
- ~StringBuilder();
- void append(const String&);
- void append(char);
- void append(const char*, size_t);
- String toString();
- void reserveCapacity(size_t);
-
- private:
- std::string string_;
-};
-
class {{config.lib.export_macro}} StringUtil {
public:
- static String substring(const String& s, unsigned pos, unsigned len) {
- return s.substr(pos, len);
- }
- static String fromInteger(int number) { return base::NumberToString(number); }
- static String fromDouble(double number) {
- String s = base::NumberToString(number);
- if (!s.empty()) { // .123 -> 0.123; -.123 -> -0.123 for valid JSON.
- if (s[0] == '.')
- s.insert(/*index=*/ 0, /*count=*/ 1, /*ch=*/ '0');
- else if (s[0] == '-' && s.size() >= 2 && s[1] == '.')
- s.insert(/*index=*/ 1, /*count=*/ 1, /*ch=*/ '0');
- }
- return s;
- }
- static double toDouble(const char* s, size_t len, bool* ok) {
- double v = 0.0;
- *ok = base::StringToDouble(std::string(s, len), &v);
- return *ok ? v : 0.0;
- }
- static size_t find(const String& s, const char* needle) {
- return s.find(needle);
- }
- static size_t find(const String& s, const String& needle) {
- return s.find(needle);
- }
- static const size_t kNotFound = static_cast<size_t>(-1);
- static void builderAppend(StringBuilder& builder, const String& s) {
- builder.append(s);
- }
- static void builderAppend(StringBuilder& builder, char c) {
- builder.append(c);
- }
- static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
- builder.append(s, len);
- }
- static void builderReserve(StringBuilder& builder, unsigned capacity) {
- builder.reserveCapacity(capacity);
- }
- static String builderToString(StringBuilder& builder) {
- return builder.toString();
- }
-
static String fromUTF8(const uint8_t* data, size_t length) {
return std::string(reinterpret_cast<const char*>(data), length);
}
diff --git a/deps/v8/third_party/inspector_protocol/roll.py b/deps/v8/third_party/inspector_protocol/roll.py
index ccf887a4d3..85765d4350 100755
--- a/deps/v8/third_party/inspector_protocol/roll.py
+++ b/deps/v8/third_party/inspector_protocol/roll.py
@@ -21,6 +21,16 @@ FILES_TO_SYNC = [
'crdtp/cbor.cc',
'crdtp/cbor.h',
'crdtp/cbor_test.cc',
+ 'crdtp/dispatch.h',
+ 'crdtp/dispatch.cc',
+ 'crdtp/dispatch_test.cc',
+ 'crdtp/error_support.cc',
+ 'crdtp/error_support.h',
+ 'crdtp/error_support_test.cc',
+ 'crdtp/export_template.h',
+ 'crdtp/find_by_first.h',
+ 'crdtp/find_by_first_test.cc',
+ 'crdtp/frontend_channel.h',
'crdtp/glue.h',
'crdtp/glue_test.cc',
'crdtp/json.cc',
@@ -33,6 +43,7 @@ FILES_TO_SYNC = [
'crdtp/serializable_test.cc',
'crdtp/serializer_traits.h',
'crdtp/serializer_traits_test.cc',
+ 'crdtp/span.cc',
'crdtp/span.h',
'crdtp/span_test.cc',
'crdtp/status.cc',
diff --git a/deps/v8/third_party/inspector_protocol/templates/Imported_h.template b/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
index b549388c8a..bb1dcf4a20 100644
--- a/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
@@ -52,7 +52,7 @@ struct ValueConversions<{{"::".join(config.imported.namespace)}}::{{domain.domai
static std::unique_ptr<{{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}> fromValue(protocol::Value* value, ErrorSupport* errors)
{
if (!value) {
- errors->addError("value expected");
+ errors->AddError("value expected");
return nullptr;
}
@@ -60,7 +60,7 @@ struct ValueConversions<{{"::".join(config.imported.namespace)}}::{{domain.domai
value->AppendSerialized(&binary);
auto result = {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}::fromBinary(binary.data(), binary.size());
if (!result)
- errors->addError("cannot parse");
+ errors->AddError("cannot parse");
return result;
}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
index eabafba755..f0a3d3ccaf 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -9,7 +9,9 @@
#include {{format_include(config.protocol.package, "Protocol")}}
#include "{{config.crdtp.dir}}/cbor.h"
+#include "{{config.crdtp.dir}}/find_by_first.h"
#include "{{config.crdtp.dir}}/serializer_traits.h"
+#include "{{config.crdtp.dir}}/span.h"
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
@@ -54,27 +56,27 @@ const char* {{type.id}}::{{property.name | to_title_case}}Enum::{{literal | dash
std::unique_ptr<{{type.id}}> {{type.id}}::fromValue(protocol::Value* value, ErrorSupport* errors)
{
if (!value || value->type() != protocol::Value::TypeObject) {
- errors->addError("object expected");
+ errors->AddError("object expected");
return nullptr;
}
std::unique_ptr<{{type.id}}> result(new {{type.id}}());
protocol::DictionaryValue* object = DictionaryValue::cast(value);
- errors->push();
+ errors->Push();
{% for property in type.properties %}
protocol::Value* {{property.name}}Value = object->get("{{property.name}}");
{% if property.optional %}
if ({{property.name}}Value) {
- errors->setName("{{property.name}}");
+ errors->SetName("{{property.name}}");
result->m_{{property.name}} = ValueConversions<{{protocol.resolve_type(property).raw_type}}>::fromValue({{property.name}}Value, errors);
}
{% else %}
- errors->setName("{{property.name}}");
+ errors->SetName("{{property.name}}");
result->m_{{property.name}} = ValueConversions<{{protocol.resolve_type(property).raw_type}}>::fromValue({{property.name}}Value, errors);
{% endif %}
{% endfor %}
- errors->pop();
- if (errors->hasErrors())
+ errors->Pop();
+ if (!errors->Errors().empty())
return nullptr;
return result;
}
@@ -169,7 +171,7 @@ void Frontend::{{event.name | to_method_case}}(
{%- endif %} {{parameter.name}}{%- if not loop.last -%}, {% endif -%}
{% endfor -%})
{
- if (!m_frontendChannel)
+ if (!frontend_channel_)
return;
{% if event.parameters %}
std::unique_ptr<{{event.name | to_title_case}}Notification> messageData = {{event.name | to_title_case}}Notification::{{"create" | to_method_case}}()
@@ -185,69 +187,79 @@ void Frontend::{{event.name | to_method_case}}(
messageData->{{"set" | to_method_case}}{{parameter.name | to_title_case}}(std::move({{parameter.name}}).takeJust());
{% endif %}
{% endfor %}
- m_frontendChannel->sendProtocolNotification(InternalResponse::createNotification("{{domain.domain}}.{{event.name}}", std::move(messageData)));
+ frontend_channel_->SendProtocolNotification({{config.crdtp.namespace}}::CreateNotification("{{domain.domain}}.{{event.name}}", std::move(messageData)));
{% else %}
- m_frontendChannel->sendProtocolNotification(InternalResponse::createNotification("{{domain.domain}}.{{event.name}}"));
+ frontend_channel_->SendProtocolNotification({{config.crdtp.namespace}}::CreateNotification("{{domain.domain}}.{{event.name}}"));
{% endif %}
}
{% endfor %}
void Frontend::flush()
{
- m_frontendChannel->flushProtocolNotifications();
+ frontend_channel_->FlushProtocolNotifications();
}
-void Frontend::sendRawCBORNotification(std::vector<uint8_t> notification)
+void Frontend::sendRawNotification(std::unique_ptr<Serializable> notification)
{
- m_frontendChannel->sendProtocolNotification(InternalRawNotification::fromBinary(std::move(notification)));
+ frontend_channel_->SendProtocolNotification(std::move(notification));
}
// --------------------- Dispatcher.
-class DispatcherImpl : public protocol::DispatcherBase {
+class DomainDispatcherImpl : public protocol::DomainDispatcher {
public:
- DispatcherImpl(FrontendChannel* frontendChannel, Backend* backend)
- : DispatcherBase(frontendChannel)
- , m_backend(backend) {
- {% for command in domain.commands %}
- {% if "redirect" in command %}
- m_redirects["{{domain.domain}}.{{command.name}}"] = "{{command.redirect}}.{{command.name}}";
- {% continue %}
- {% endif %}
- {% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
- m_dispatchMap["{{domain.domain}}.{{command.name}}"] = &DispatcherImpl::{{command.name}};
- {% endfor %}
- }
- ~DispatcherImpl() override { }
- bool canDispatch(const String& method) override;
- void dispatch(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
- std::unordered_map<String, String>& redirects() { return m_redirects; }
+ DomainDispatcherImpl(FrontendChannel* frontendChannel, Backend* backend)
+ : DomainDispatcher(frontendChannel)
+ , m_backend(backend) {}
+ ~DomainDispatcherImpl() override { }
+
+ using CallHandler = void (DomainDispatcherImpl::*)(const {{config.crdtp.namespace}}::Dispatchable& dispatchable, DictionaryValue* params, ErrorSupport* errors);
-protected:
- using CallHandler = void (DispatcherImpl::*)(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
- using DispatchMap = std::unordered_map<String, CallHandler>;
- DispatchMap m_dispatchMap;
- std::unordered_map<String, String> m_redirects;
+ std::function<void(const {{config.crdtp.namespace}}::Dispatchable&)> Dispatch({{config.crdtp.namespace}}::span<uint8_t> command_name) override;
{% for command in domain.commands %}
{% if "redirect" in command %}{% continue %}{% endif %}
{% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
- void {{command.name}}(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport*);
+ void {{command.name}}(const {{config.crdtp.namespace}}::Dispatchable& dispatchable, DictionaryValue* params, ErrorSupport* errors);
{% endfor %}
-
+ protected:
Backend* m_backend;
};
-bool DispatcherImpl::canDispatch(const String& method) {
- return m_dispatchMap.find(method) != m_dispatchMap.end();
+namespace {
+// This helper method with a static map of command methods (instance methods
+// of DomainDispatcherImpl declared just above) by their name is used immediately below,
+// in the DomainDispatcherImpl::Dispatch method.
+DomainDispatcherImpl::CallHandler CommandByName({{config.crdtp.namespace}}::span<uint8_t> command_name) {
+ static auto* commands = [](){
+ auto* commands = new std::vector<std::pair<{{config.crdtp.namespace}}::span<uint8_t>,
+ DomainDispatcherImpl::CallHandler>>{
+ {% for command in domain.commands|sort(attribute="name",case_sensitive=True) %}
+ {% if "redirect" in command %}{% continue %}{% endif %}
+ {% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
+ {
+ {{config.crdtp.namespace}}::SpanFrom("{{command.name}}"),
+ &DomainDispatcherImpl::{{command.name}}
+ },
+ {% endfor %}
+ };
+ return commands;
+ }();
+ return {{config.crdtp.namespace}}::FindByFirst<DomainDispatcherImpl::CallHandler>(*commands, command_name, nullptr);
}
-
-void DispatcherImpl::dispatch(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message, std::unique_ptr<protocol::DictionaryValue> messageObject)
-{
- std::unordered_map<String, CallHandler>::iterator it = m_dispatchMap.find(method);
- DCHECK(it != m_dispatchMap.end());
- protocol::ErrorSupport errors;
- (this->*(it->second))(callId, method, message, std::move(messageObject), &errors);
+} // namespace
+
+std::function<void(const {{config.crdtp.namespace}}::Dispatchable&)> DomainDispatcherImpl::Dispatch({{config.crdtp.namespace}}::span<uint8_t> command_name) {
+ CallHandler handler = CommandByName(command_name);
+ if (!handler) return nullptr;
+ return [this, handler](const {{config.crdtp.namespace}}::Dispatchable& dispatchable){
+ std::unique_ptr<DictionaryValue> params =
+ DictionaryValue::cast(protocol::Value::parseBinary(dispatchable.Params().data(),
+ dispatchable.Params().size()));
+ ErrorSupport errors;
+ errors.Push();
+ (this->*handler)(dispatchable, params.get(), &errors);
+ };
}
{% for command in domain.commands %}
@@ -256,10 +268,11 @@ void DispatcherImpl::dispatch(int callId, const String& method, {{config.crdtp.n
{% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
{% if protocol.is_async_command(domain.domain, command.name) %}
-class {{command_name_title}}CallbackImpl : public Backend::{{command_name_title}}Callback, public DispatcherBase::Callback {
+class {{command_name_title}}CallbackImpl : public Backend::{{command_name_title}}Callback, public DomainDispatcher::Callback {
public:
- {{command_name_title}}CallbackImpl(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message)
- : DispatcherBase::Callback(std::move(backendImpl), callId, method, message) { }
+ {{command_name_title}}CallbackImpl(std::unique_ptr<DomainDispatcher::WeakPtr> backendImpl, int callId, {{config.crdtp.namespace}}::span<uint8_t> message)
+ : DomainDispatcher::Callback(std::move(backendImpl), callId,
+{{config.crdtp.namespace}}::SpanFrom("{{domain.domain}}.{{command.name}}"), message) { }
void sendSuccess(
{%- for parameter in command.returns -%}
@@ -271,16 +284,16 @@ public:
{%- if not loop.last -%}, {% endif -%}
{%- endfor -%}) override
{
- std::unique_ptr<protocol::DictionaryValue> resultObject = DictionaryValue::create();
- {% for parameter in command.returns %}
- {% if "optional" in parameter %}
- if ({{parameter.name}}.isJust())
- resultObject->setValue("{{parameter.name}}", ValueConversions<{{protocol.resolve_type(parameter).raw_type}}>::toValue({{parameter.name}}.fromJust()));
- {% else %}
- resultObject->setValue("{{parameter.name}}", ValueConversions<{{protocol.resolve_type(parameter).raw_type}}>::toValue({{protocol.resolve_type(parameter).to_raw_type % parameter.name}}));
- {% endif %}
- {% endfor %}
- sendIfActive(std::move(resultObject), DispatchResponse::OK());
+ std::vector<uint8_t> result_buffer;
+ {{config.crdtp.namespace}}::cbor::EnvelopeEncoder envelope_encoder;
+ envelope_encoder.EncodeStart(&result_buffer);
+ result_buffer.push_back({{config.crdtp.namespace}}::cbor::EncodeIndefiniteLengthMapStart());
+ {% for parameter in command.returns %}
+ {{config.crdtp.namespace}}::SerializeField({{config.crdtp.namespace}}::SpanFrom("{{parameter.name}}"), {{parameter.name}}, &result_buffer);
+ {% endfor %}
+ result_buffer.push_back({{config.crdtp.namespace}}::cbor::EncodeStop());
+ envelope_encoder.EncodeStop(&result_buffer);
+ sendIfActive({{config.crdtp.namespace}}::Serializable::From(std::move(result_buffer)), DispatchResponse::Success());
}
void fallThrough() override
@@ -290,37 +303,31 @@ public:
void sendFailure(const DispatchResponse& response) override
{
- DCHECK(response.status() == DispatchResponse::kError);
+ DCHECK(response.IsError());
sendIfActive(nullptr, response);
}
};
{% endif %}
-void DispatcherImpl::{{command.name}}(int callId, const String& method, {{config.crdtp.namespace}}::span<uint8_t> message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport* errors)
+void DomainDispatcherImpl::{{command.name}}(const {{config.crdtp.namespace}}::Dispatchable& dispatchable, DictionaryValue* params, ErrorSupport* errors)
{
{% if "parameters" in command %}
// Prepare input parameters.
- protocol::DictionaryValue* object = DictionaryValue::cast(requestMessageObject->get("params"));
- errors->push();
{% for parameter in command.parameters %}
{% set parameter_type = protocol.resolve_type(parameter) %}
- protocol::Value* {{parameter.name}}Value = object ? object->get("{{parameter.name}}") : nullptr;
+ protocol::Value* {{parameter.name}}Value = params ? params->get("{{parameter.name}}") : nullptr;
{% if parameter.optional %}
Maybe<{{parameter_type.raw_type}}> in_{{parameter.name}};
if ({{parameter.name}}Value) {
- errors->setName("{{parameter.name}}");
+ errors->SetName("{{parameter.name}}");
in_{{parameter.name}} = ValueConversions<{{parameter_type.raw_type}}>::fromValue({{parameter.name}}Value, errors);
}
{% else %}
- errors->setName("{{parameter.name}}");
+ errors->SetName("{{parameter.name}}");
{{parameter_type.type}} in_{{parameter.name}} = ValueConversions<{{parameter_type.raw_type}}>::fromValue({{parameter.name}}Value, errors);
{% endif %}
{% endfor %}
- errors->pop();
- if (errors->hasErrors()) {
- reportProtocolError(callId, DispatchResponse::kInvalidParams, kInvalidParamsString, errors);
- return;
- }
+ if (MaybeReportInvalidParams(dispatchable, *errors)) return;
{% endif %}
{% if "returns" in command and not protocol.is_async_command(domain.domain, command.name) %}
// Declare output parameters.
@@ -334,7 +341,7 @@ void DispatcherImpl::{{command.name}}(int callId, const String& method, {{config
{% endif %}
{% if not protocol.is_async_command(domain.domain, command.name) %}
- std::unique_ptr<DispatcherBase::WeakPtr> weak = weakPtr();
+ std::unique_ptr<DomainDispatcher::WeakPtr> weak = weakPtr();
DispatchResponse response = m_backend->{{command.name | to_method_case}}(
{%- for parameter in command.parameters -%}
{%- if not loop.first -%}, {% endif -%}
@@ -350,31 +357,31 @@ void DispatcherImpl::{{command.name}}(int callId, const String& method, {{config
&out_{{parameter.name}}
{%- endfor %}
{% endif %});
- if (response.status() == DispatchResponse::kFallThrough) {
- channel()->fallThrough(callId, method, message);
+ if (response.IsFallThrough()) {
+ channel()->FallThrough(dispatchable.CallId(), {{config.crdtp.namespace}}::SpanFrom("{{domain.domain}}.{{command.name}}"), dispatchable.Serialized());
return;
}
{% if "returns" in command %}
- std::unique_ptr<protocol::DictionaryValue> result = DictionaryValue::create();
- if (response.status() == DispatchResponse::kSuccess) {
- {% for parameter in command.returns %}
- {% if "optional" in parameter %}
- if (out_{{parameter.name}}.isJust())
- result->setValue("{{parameter.name}}", ValueConversions<{{protocol.resolve_type(parameter).raw_type}}>::toValue(out_{{parameter.name}}.fromJust()));
- {% else %}
- result->setValue("{{parameter.name}}", ValueConversions<{{protocol.resolve_type(parameter).raw_type}}>::toValue({{protocol.resolve_type(parameter).to_raw_type % ("out_" + parameter.name)}}));
- {% endif %}
- {% endfor %}
- }
- if (weak->get())
- weak->get()->sendResponse(callId, response, std::move(result));
+ if (weak->get()) {
+ std::vector<uint8_t> result;
+ if (response.IsSuccess()) {
+ {{config.crdtp.namespace}}::cbor::EnvelopeEncoder envelope_encoder;
+ envelope_encoder.EncodeStart(&result);
+ result.push_back({{config.crdtp.namespace}}::cbor::EncodeIndefiniteLengthMapStart());
+ {% for parameter in command.returns %}
+ {{config.crdtp.namespace}}::SerializeField({{config.crdtp.namespace}}::SpanFrom("{{parameter.name}}"), out_{{parameter.name}}, &result);
+ {% endfor %}
+ result.push_back({{config.crdtp.namespace}}::cbor::EncodeStop());
+ envelope_encoder.EncodeStop(&result);
+ }
+ weak->get()->sendResponse(dispatchable.CallId(), response, {{config.crdtp.namespace}}::Serializable::From(std::move(result)));
+ }
{% else %}
if (weak->get())
- weak->get()->sendResponse(callId, response);
+ weak->get()->sendResponse(dispatchable.CallId(), response);
{% endif %}
return;
{% else %}
- std::unique_ptr<{{command_name_title}}CallbackImpl> callback(new {{command.name | to_title_case}}CallbackImpl(weakPtr(), callId, method, message));
m_backend->{{command.name | to_method_case}}(
{%- for property in command.parameters -%}
{%- if not loop.first -%}, {% endif -%}
@@ -385,18 +392,34 @@ void DispatcherImpl::{{command.name}}(int callId, const String& method, {{config
{%- endif -%}
{%- endfor -%}
{%- if command.parameters -%}, {% endif -%}
- std::move(callback));
- return;
+ std::make_unique<{{command_name_title}}CallbackImpl>(weakPtr(), dispatchable.CallId(), dispatchable.Serialized()));
{% endif %}
}
{% endfor %}
+namespace {
+// This helper method (with a static map of redirects) is used from Dispatcher::wire
+// immediately below.
+const std::vector<std::pair<{{config.crdtp.namespace}}::span<uint8_t>, {{config.crdtp.namespace}}::span<uint8_t>>>& SortedRedirects() {
+ static auto* redirects = [](){
+ auto* redirects = new std::vector<std::pair<{{config.crdtp.namespace}}::span<uint8_t>, {{config.crdtp.namespace}}::span<uint8_t>>>{
+ {% for command in domain.commands|sort(attribute="name",case_sensitive=True) %}
+ {% if "redirect" in command %}
+ { {{config.crdtp.namespace}}::SpanFrom("{{domain.domain}}.{{command.name}}"), {{config.crdtp.namespace}}::SpanFrom("{{command.redirect}}.{{command.name}}") },
+ {% endif %}
+ {% endfor %}
+ };
+ return redirects;
+ }();
+ return *redirects;
+}
+} // namespace
+
// static
void Dispatcher::wire(UberDispatcher* uber, Backend* backend)
{
- std::unique_ptr<DispatcherImpl> dispatcher(new DispatcherImpl(uber->channel(), backend));
- uber->setupRedirects(dispatcher->redirects());
- uber->registerBackend("{{domain.domain}}", std::move(dispatcher));
+ auto dispatcher = std::make_unique<DomainDispatcherImpl>(uber->channel(), backend);
+ uber->WireBackend({{config.crdtp.namespace}}::SpanFrom("{{domain.domain}}"), SortedRedirects(), std::move(dispatcher));
}
} // {{domain.domain}}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
index 08615f8828..bc3998e4a2 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
@@ -239,7 +239,7 @@ public:
{% if protocol.generate_disable(domain) %}
virtual DispatchResponse {{"disable" | to_method_case}}()
{
- return DispatchResponse::OK();
+ return DispatchResponse::Success();
}
{% endif %}
};
@@ -248,7 +248,7 @@ public:
class {{config.protocol.export_macro}} Frontend {
public:
- explicit Frontend(FrontendChannel* frontendChannel) : m_frontendChannel(frontendChannel) { }
+ explicit Frontend(FrontendChannel* frontend_channel) : frontend_channel_(frontend_channel) {}
{% for event in domain.events %}
{% if not protocol.generate_event(domain.domain, event.name) %}{% continue %}{% endif %}
void {{event.name | to_method_case}}(
@@ -262,10 +262,10 @@ public:
);
{% endfor %}
- void flush();
- void sendRawCBORNotification(std::vector<uint8_t>);
-private:
- FrontendChannel* m_frontendChannel;
+ void flush();
+ void sendRawNotification(std::unique_ptr<Serializable>);
+ private:
+ FrontendChannel* frontend_channel_;
};
// ------------- Dispatcher.
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index d920599e6e..2d9f33a312 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -14,7 +14,7 @@
// https://github.com/python/cpython/blob/master/Objects/listsort.txt
namespace array {
- class SortState extends Struct {
+ class SortState extends HeapObject {
macro Compare(implicit context: Context)(x: JSAny, y: JSAny): Number {
const sortCompare: CompareBuiltinFn = this.sortComparePtr;
return sortCompare(context, this.userCmpFn, x, y);
diff --git a/deps/v8/third_party/zlib/BUILD.gn b/deps/v8/third_party/zlib/BUILD.gn
index 2414c8826f..1f572378e0 100644
--- a/deps/v8/third_party/zlib/BUILD.gn
+++ b/deps/v8/third_party/zlib/BUILD.gn
@@ -17,7 +17,10 @@ config("zlib_internal_config") {
}
use_arm_neon_optimizations = false
-if (current_cpu == "arm" || current_cpu == "arm64") {
+if ((current_cpu == "arm" || current_cpu == "arm64") &&
+ !(is_win && !is_clang)) {
+ # TODO(richard.townsend@arm.com): Optimizations temporarily disabled for
+ # Windows on Arm MSVC builds, see http://crbug.com/v8/10012.
if (arm_use_neon) {
use_arm_neon_optimizations = true
}
diff --git a/deps/v8/third_party/zlib/contrib/minizip/iowin32.c b/deps/v8/third_party/zlib/contrib/minizip/iowin32.c
index 246ceb91a1..c6bc314b3c 100644
--- a/deps/v8/third_party/zlib/contrib/minizip/iowin32.c
+++ b/deps/v8/third_party/zlib/contrib/minizip/iowin32.c
@@ -31,14 +31,12 @@
#define _WIN32_WINNT 0x601
#endif
-#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
-// see Include/shared/winapifamily.h in the Windows Kit
-#if defined(WINAPI_FAMILY_PARTITION) && (!(defined(IOWIN32_USING_WINRT_API)))
-#if WINAPI_FAMILY_ONE_PARTITION(WINAPI_FAMILY, WINAPI_PARTITION_APP)
+#if !defined(IOWIN32_USING_WINRT_API)
+#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP)
+// Windows Store or Universal Windows Platform
#define IOWIN32_USING_WINRT_API 1
#endif
#endif
-#endif
voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode));
uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size));
diff --git a/deps/v8/third_party/zlib/cpu_features.c b/deps/v8/third_party/zlib/cpu_features.c
index 8a25dd29c8..ceed98822a 100644
--- a/deps/v8/third_party/zlib/cpu_features.c
+++ b/deps/v8/third_party/zlib/cpu_features.c
@@ -22,6 +22,8 @@ int ZLIB_INTERNAL arm_cpu_enable_pmull = 0;
int ZLIB_INTERNAL x86_cpu_enable_ssse3 = 0;
int ZLIB_INTERNAL x86_cpu_enable_simd = 0;
+#ifndef CPU_NO_SIMD
+
#if defined(ARMV8_OS_ANDROID) || defined(ARMV8_OS_LINUX) || defined(ARMV8_OS_FUCHSIA)
#include <pthread.h>
#endif
@@ -143,3 +145,4 @@ static void _cpu_check_features(void)
}
#endif
#endif
+#endif \ No newline at end of file
diff --git a/deps/v8/third_party/zlib/crc32_simd.h b/deps/v8/third_party/zlib/crc32_simd.h
index 315c2a45f4..68bc235cbe 100644
--- a/deps/v8/third_party/zlib/crc32_simd.h
+++ b/deps/v8/third_party/zlib/crc32_simd.h
@@ -33,3 +33,4 @@ uint32_t ZLIB_INTERNAL crc32_sse42_simd_(
uint32_t ZLIB_INTERNAL armv8_crc32_little(unsigned long crc,
const unsigned char* buf,
z_size_t len);
+
diff --git a/deps/v8/third_party/zlib/deflate.c b/deps/v8/third_party/zlib/deflate.c
index a20c42af7a..744d8558e2 100644
--- a/deps/v8/third_party/zlib/deflate.c
+++ b/deps/v8/third_party/zlib/deflate.c
@@ -1213,7 +1213,7 @@ ZLIB_INTERNAL unsigned deflate_read_buf(strm, buf, size)
#ifdef GZIP
if (strm->state->wrap == 2)
copy_with_crc(strm, buf, len);
- else
+ else
#endif
{
zmemcpy(buf, strm->next_in, len);
diff --git a/deps/v8/third_party/zlib/patches/0000-build.patch b/deps/v8/third_party/zlib/patches/0000-build.patch
index 1e005375f5..b7c5996482 100644
--- a/deps/v8/third_party/zlib/patches/0000-build.patch
+++ b/deps/v8/third_party/zlib/patches/0000-build.patch
@@ -3,12 +3,12 @@ index 8dcbdb06e35a..c1b7a54847f5 100644
--- a/contrib/minizip/ioapi.h
+++ b/contrib/minizip/ioapi.h
@@ -43,7 +43,7 @@
-
+
#include <stdio.h>
#include <stdlib.h>
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
-
+
#if defined(USE_FILE32API)
#define fopen64 fopen
diff --git a/contrib/minizip/iowin32.c b/contrib/minizip/iowin32.c
@@ -17,8 +17,8 @@ index 274f39eb1dd2..246ceb91a139 100644
+++ b/contrib/minizip/iowin32.c
@@ -26,12 +26,19 @@
#endif
-
-
+
+
+#ifdef _WIN32_WINNT
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x601
@@ -32,7 +32,7 @@ index 274f39eb1dd2..246ceb91a139 100644
#endif
#endif
+#endif
-
+
voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode));
uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size));
diff --git a/contrib/minizip/mztools.c b/contrib/minizip/mztools.c
@@ -46,7 +46,7 @@ index 96891c2e0b71..8bf9cca32633 100644
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
#include "unzip.h"
-
+
#define READ_8(adr) ((unsigned char)*(adr))
diff --git a/contrib/minizip/mztools.h b/contrib/minizip/mztools.h
index a49a426ec2fc..f295ffeda6af 100644
@@ -54,12 +54,12 @@ index a49a426ec2fc..f295ffeda6af 100644
+++ b/contrib/minizip/mztools.h
@@ -12,7 +12,7 @@ extern "C" {
#endif
-
+
#ifndef _ZLIB_H
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
#endif
-
+
#include "unzip.h"
diff --git a/contrib/minizip/unzip.c b/contrib/minizip/unzip.c
index bcfb9416ec35..199b4723fcfc 100644
@@ -68,16 +68,16 @@ index bcfb9416ec35..199b4723fcfc 100644
@@ -72,7 +72,7 @@
#define NOUNCRYPT
#endif
-
+
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
#include "unzip.h"
-
+
#ifdef STDC
@@ -1705,11 +1705,6 @@ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len)
-
+
pfile_in_zip_read_info->stream.avail_out = (uInt)len;
-
+
- if ((len>pfile_in_zip_read_info->rest_read_uncompressed) &&
- (!(pfile_in_zip_read_info->raw)))
- pfile_in_zip_read_info->stream.avail_out =
@@ -92,12 +92,12 @@ index 2104e3915074..3c0143529b91 100644
+++ b/contrib/minizip/unzip.h
@@ -48,7 +48,7 @@ extern "C" {
#endif
-
+
#ifndef _ZLIB_H
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
#endif
-
+
#ifndef _ZLIBIOAPI_H
diff --git a/contrib/minizip/zip.c b/contrib/minizip/zip.c
index 44e88a9cb989..65c0c7251843 100644
@@ -110,7 +110,7 @@ index 44e88a9cb989..65c0c7251843 100644
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
#include "zip.h"
-
+
#ifdef STDC
diff --git a/contrib/minizip/zip.h b/contrib/minizip/zip.h
index 8aaebb623430..8c06c0aa7bb0 100644
@@ -118,12 +118,12 @@ index 8aaebb623430..8c06c0aa7bb0 100644
+++ b/contrib/minizip/zip.h
@@ -47,7 +47,7 @@ extern "C" {
//#define HAVE_BZIP2
-
+
#ifndef _ZLIB_H
-#include "zlib.h"
+#include "third_party/zlib/zlib.h"
#endif
-
+
#ifndef _ZLIBIOAPI_H
diff --git a/gzread.c b/gzread.c
index 956b91ea7d9e..832d3ef98c59 100644
@@ -148,7 +148,7 @@ index 5e1d68a004e9..a7a815f575a7 100644
@@ -8,6 +8,10 @@
#ifndef ZCONF_H
#define ZCONF_H
-
+
+/*
+ * This library is also built as a part of AOSP, which does not need to include
+ * chromeconf.h. This config does not want chromeconf.h, so it can set this
@@ -167,12 +167,12 @@ index 5e1d68a004e9..a7a815f575a7 100644
@@ -431,7 +434,7 @@ typedef uLong FAR uLongf;
typedef unsigned long z_crc_t;
#endif
-
+
-#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */
+#if !defined(_WIN32)
# define Z_HAVE_UNISTD_H
#endif
-
+
diff --git a/zlib.h b/zlib.h
index f09cdaf1e054..99fd467f6b1a 100644
--- a/zlib.h
@@ -242,6 +242,6 @@ index b079ea6a80f5..80375b8b6109 100644
+# include <errno.h>
+# endif
+#endif
-
+
#ifdef Z_SOLO
typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */
diff --git a/deps/v8/third_party/zlib/patches/0001-simd.patch b/deps/v8/third_party/zlib/patches/0001-simd.patch
index 5e88c46bb8..9434ca0cc4 100644
--- a/deps/v8/third_party/zlib/patches/0001-simd.patch
+++ b/deps/v8/third_party/zlib/patches/0001-simd.patch
@@ -5,11 +5,11 @@ index 9580440c0e6b..9162429cc7b4 100644
@@ -28,6 +28,8 @@
# endif /* !DYNAMIC_CRC_TABLE */
#endif /* MAKECRCH */
-
+
+#include "deflate.h"
+#include "x86.h"
#include "zutil.h" /* for STDC and FAR definitions */
-
+
/* Definitions for doing the crc four data bytes at a time. */
@@ -440,3 +442,28 @@ uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
{
@@ -545,13 +545,13 @@ index 1ec761448de9..aa0c9c67a6dc 100644
+++ b/deflate.c
@@ -48,8 +48,9 @@
*/
-
+
/* @(#) $Id$ */
-
+#include <assert.h>
#include "deflate.h"
+#include "x86.h"
-
+
const char deflate_copyright[] =
" deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler ";
@@ -86,7 +87,7 @@ local block_state deflate_huff OF((deflate_state *s, int flush));
@@ -566,7 +566,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
@@ -100,6 +101,20 @@ local void check_match OF((deflate_state *s, IPos start, IPos match,
int length));
#endif
-
+
+/* From crc32.c */
+extern void ZLIB_INTERNAL crc_reset(deflate_state *const s);
+extern void ZLIB_INTERNAL crc_finalize(deflate_state *const s);
@@ -587,7 +587,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
@@ -162,7 +177,6 @@ local const config configuration_table[10] = {
*/
#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
-
+
-
/* ===========================================================================
* Insert string str in the dictionary and set match_head to the previous head
@@ -626,7 +626,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
+ return insert_string_c(s, str);
+}
+
-
+
/* ===========================================================================
* Initialize the hash table (avoiding 64K overflow for 16 bit systems).
@@ -248,6 +273,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
@@ -640,7 +640,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
@@ -257,6 +283,8 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
* output size for (length,distance) codes is <= 24 bits.
*/
-
+
+ x86_check_features();
+
if (version == Z_NULL || version[0] != my_version[0] ||
@@ -649,7 +649,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
@@ -313,12 +341,19 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
s->w_size = 1 << s->w_bits;
s->w_mask = s->w_size - 1;
-
+
- s->hash_bits = (uInt)memLevel + 7;
+ if (x86_cpu_enable_simd) {
+ s->hash_bits = 15;
@@ -660,14 +660,14 @@ index 1ec761448de9..aa0c9c67a6dc 100644
s->hash_size = 1 << s->hash_bits;
s->hash_mask = s->hash_size - 1;
s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
-
+
- s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+ s->window = (Bytef *) ZALLOC(strm,
+ s->w_size + window_padding,
+ 2*sizeof(Byte));
s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
-
+
@@ -418,11 +453,7 @@ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
str = s->strstart;
n = s->lookahead - (MIN_MATCH-1);
@@ -708,9 +708,9 @@ index 1ec761448de9..aa0c9c67a6dc 100644
Bytef *buf;
unsigned size;
@@ -1173,15 +1205,16 @@ local unsigned read_buf(strm, buf, size)
-
+
strm->avail_in -= len;
-
+
- zmemcpy(buf, strm->next_in, len);
- if (strm->state->wrap == 1) {
- strm->adler = adler32(strm->adler, buf, len);
@@ -721,7 +721,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
- }
+ if (strm->state->wrap == 2)
+ copy_with_crc(strm, buf, len);
-+ else
++ else
#endif
+ {
+ zmemcpy(buf, strm->next_in, len);
@@ -730,7 +730,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
+ }
strm->next_in += len;
strm->total_in += len;
-
+
@@ -1479,7 +1512,19 @@ local void check_match(s, start, match, length)
* performed for at least two bytes (required for the zip translate_eol
* option -- not supported here).
@@ -759,7 +759,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
- INSERT_STRING(s, s->strstart, hash_head);
+ hash_head = insert_string(s, s->strstart);
}
-
+
/* Find the longest match, discarding those <= prev_length.
@@ -1878,7 +1923,7 @@ local block_state deflate_fast(s, flush)
s->match_length--; /* string at strstart already in table */
@@ -777,7 +777,7 @@ index 1ec761448de9..aa0c9c67a6dc 100644
- INSERT_STRING(s, s->strstart, hash_head);
+ hash_head = insert_string(s, s->strstart);
}
-
+
/* Find the longest match, discarding those <= prev_length.
@@ -2001,7 +2046,7 @@ local block_state deflate_slow(s, flush)
s->prev_length -= 2;
@@ -837,12 +837,12 @@ index 23ecdd312bc0..ab56df7663b6 100644
-
+ unsigned zalign(16) crc0[4 * 5];
/* used by deflate.c: */
-
+
uInt w_size; /* LZ77 window size (32K by default) */
@@ -346,4 +346,14 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
flush = _tr_tally(s, distance, length)
#endif
-
+
+/* Functions that are SIMD optimised on x86 */
+void ZLIB_INTERNAL crc_fold_init(deflate_state* const s);
+void ZLIB_INTERNAL crc_fold_copy(deflate_state* const s,
@@ -1090,7 +1090,7 @@ index 000000000000..e56fe8b85a39
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ * Author:
+ * Jim Kukunas
-+ *
++ *
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
@@ -1204,7 +1204,7 @@ index 80375b8b6109..4425bcf75eb3 100644
@@ -283,4 +283,10 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
(((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
-
+
+#ifdef _MSC_VER
+#define zalign(x) __declspec(align(x))
+#else
diff --git a/deps/v8/third_party/zlib/patches/0003-uninitializedjump.patch b/deps/v8/third_party/zlib/patches/0003-uninitializedjump.patch
index 5208319f70..7aae3238a5 100644
--- a/deps/v8/third_party/zlib/patches/0003-uninitializedjump.patch
+++ b/deps/v8/third_party/zlib/patches/0003-uninitializedjump.patch
@@ -11,5 +11,5 @@ index a39e62787862..c6053fd1c7ea 100644
+ */
+ zmemzero(s->prev, s->w_size * sizeof(Pos));
s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
-
+
s->high_water = 0; /* nothing written to s->window yet */
diff --git a/deps/v8/third_party/zlib/patches/0004-fix-uwp.patch b/deps/v8/third_party/zlib/patches/0004-fix-uwp.patch
new file mode 100644
index 0000000000..23145a7ae5
--- /dev/null
+++ b/deps/v8/third_party/zlib/patches/0004-fix-uwp.patch
@@ -0,0 +1,22 @@
+diff --git a/third_party/zlib/contrib/minizip/iowin32.c b/third_party/zlib/contrib/minizip/iowin32.c
+index 246ceb91a139..c6bc314b3c28 100644
+--- a/third_party/zlib/contrib/minizip/iowin32.c
++++ b/third_party/zlib/contrib/minizip/iowin32.c
+@@ -31,14 +31,12 @@
+ #define _WIN32_WINNT 0x601
+ #endif
+
+-#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
+-// see Include/shared/winapifamily.h in the Windows Kit
+-#if defined(WINAPI_FAMILY_PARTITION) && (!(defined(IOWIN32_USING_WINRT_API)))
+-#if WINAPI_FAMILY_ONE_PARTITION(WINAPI_FAMILY, WINAPI_PARTITION_APP)
++#if !defined(IOWIN32_USING_WINRT_API)
++#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP)
++// Windows Store or Universal Windows Platform
+ #define IOWIN32_USING_WINRT_API 1
+ #endif
+ #endif
+-#endif
+
+ voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode));
+ uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size));
diff --git a/deps/v8/third_party/zlib/trees.h b/deps/v8/third_party/zlib/trees.h
index 45a749f0cc..d35639d82a 100644
--- a/deps/v8/third_party/zlib/trees.h
+++ b/deps/v8/third_party/zlib/trees.h
@@ -125,3 +125,4 @@ local const int base_dist[D_CODES] = {
32, 48, 64, 96, 128, 192, 256, 384, 512, 768,
1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576
};
+
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 8d18f4df75..2f8197dd36 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -14,25 +14,23 @@ group("gn_all") {
"gcmole:v8_run_gcmole",
"jsfunfuzz:v8_jsfunfuzz",
]
+
+ if (is_win) {
+ data_deps += [ "v8windbg" ]
+ }
}
group("v8_check_static_initializers") {
- data_deps = [
- "..:d8",
- ]
+ data_deps = [ "..:d8" ]
- data = [
- "check-static-initializers.sh",
- ]
+ data = [ "check-static-initializers.sh" ]
}
group("v8_android_test_runner_deps") {
testonly = true
if (is_android && !build_with_chromium) {
- data_deps = [
- "//build/android:test_runner_py",
- ]
+ data_deps = [ "//build/android:test_runner_py" ]
data = [
# This is used by android.py, but not included by test_runner_py above.
"//third_party/catapult/devil/devil/android/perf/",
@@ -44,9 +42,9 @@ group("v8_testrunner") {
testonly = true
data_deps = [
- "..:v8_python_base",
- "..:v8_dump_build_config",
":v8_android_test_runner_deps",
+ "..:v8_dump_build_config",
+ "..:v8_python_base",
]
data = [
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index a7580c9319..97a6638215 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1101,13 +1101,13 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
var reader = new FileReader();
reader.onload = function(evt) {
- handleLoadText(this.result, append);
+ handleLoadText(this.result, append, file.name);
}
reader.readAsText(file);
}
- function handleLoadText(text, append) {
- handleLoadJSON(JSON.parse(text), append);
+ function handleLoadText(text, append, fileName) {
+ handleLoadJSON(JSON.parse(text), append, fileName);
}
function getStateFromParams() {
@@ -1121,10 +1121,10 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return result;
}
- function handleLoadJSON(json, append) {
+ function handleLoadJSON(json, append, fileName) {
let isFirstLoad = pages === undefined;
json = fixClusterTelemetryResults(json);
- json = fixSinglePageJSON(json);
+ json = fixSinglePageJSON(json, fileName);
if (append && !isFirstLoad) {
json = createUniqueVersions(json)
}
@@ -1144,7 +1144,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function fixClusterTelemetryResults(json) {
// Convert CT results to callstats compatible JSON
// Input:
- // { PATH: { "pairs": { METRIC: { "count": XX, "time": XX }.. }}.. }
+ // { VERSION_NAME: { PAGE: { METRIC: { "count": {XX}, "duration": {XX} }.. }}.. }
let firstEntry;
for (let key in json) {
firstEntry = json[key];
@@ -1171,19 +1171,28 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return {__proto__:null, ClusterTelemetry: result};
}
- function fixSinglePageJSON(json) {
+ function fixSinglePageJSON(json, name) {
// Try to detect the single-version case, where we're missing the toplevel
// version object. The incoming JSON is of the form:
- // {"Page 1": [... data points ... ], "Page 2": [...], ...}
+ // { PAGE: ... , PAGE_2: }
// Instead of the default multi-page JSON:
// {"Version 1": { "Page 1": ..., ...}, "Version 2": {...}, ...}
// In this case insert a single "Default" version as top-level entry.
- var firstProperty = (object) => {
- for (var key in object) return key;
+ let firstProperty = (object) => {
+ for (let key in object) return object[key];
};
- var maybePage = json[firstProperty(json)];
- if (!Array.isArray(maybePage)) return json;
- return {"Default": json}
+ let maybePage = firstProperty(json);
+ let maybeMetrics = firstProperty(maybePage);
+ let tempName = name ? name : new Date().toISOString();
+ if ('count' in maybeMetrics && 'duration' in maybeMetrics) {
+ return {[tempName]: json}
+ }
+ // Legacy fallback where the metrics are encoded as arrays:
+ // { PAGE: [[metric_name, ...], [...], ]}
+ if (Array.isArray(maybeMetrics)) {
+ return {[tempName]: json}
+ }
+ return json
}
var appendIndex = 0;
@@ -1603,9 +1612,19 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
}
PageVersion.fromJSON = function(version, name, data) {
- var page = new PageVersion(version, pages.get(name));
- for (var i = 0; i < data.length; i++) {
- page.add(Entry.fromJSON(i, data[data.length - i - 1]));
+ let page = new PageVersion(version, pages.get(name));
+ // Distinguish between the legacy format which just uses Arrays,
+ // or the new object style.
+ if (Array.isArray(data)) {
+ for (let i = 0; i < data.length; i++) {
+ page.add(Entry.fromLegacyJSON(i, data[data.length - i - 1]));
+ }
+ } else {
+ let position = 0;
+ for (let metric_name in data) {
+ page.add(Entry.fromJSON(position, metric_name, data[metric_name]));
+ position++;
+ }
}
page.sort();
return page
@@ -1698,9 +1717,15 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return this._timeVariancePercent
}
}
- Entry.fromJSON = function(position, data) {
+ Entry.fromLegacyJSON = function(position, data) {
return new Entry(position, ...data);
}
+ Entry.fromJSON = function(position, name, data) {
+ let time = data.duration;
+ let count = data.count;
+ return new Entry(position, name, time.average, time.stddev,
+ count.average, count.stddev);
+ }
class Group {
constructor(name, regexp, color) {
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 248970e81a..f756757a9a 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -32,8 +32,6 @@ import operator
from callstats_groups import RUNTIME_CALL_STATS_GROUPS
import numpy
-import scipy
-import scipy.stats
from math import sqrt
@@ -328,6 +326,10 @@ def do_run_replay_server(args):
# Calculate statistics.
def statistics(data):
+ # NOTE(V8:10269): imports moved here to mitigate the outage.
+ import scipy
+ import scipy.stats
+
N = len(data)
average = numpy.average(data)
median = numpy.median(data)
diff --git a/deps/v8/tools/clusterfuzz/BUILD.gn b/deps/v8/tools/clusterfuzz/BUILD.gn
index e0c4531555..d75e6f9687 100644
--- a/deps/v8/tools/clusterfuzz/BUILD.gn
+++ b/deps/v8/tools/clusterfuzz/BUILD.gn
@@ -11,8 +11,11 @@ if (v8_correctness_fuzzer) {
"v8_foozzie.py",
"v8_foozzie_harness_adjust.js",
"v8_fuzz_config.py",
+ "v8_fuzz_experiments.json",
+ "v8_fuzz_flags.json",
"v8_mock.js",
"v8_mock_archs.js",
+ "v8_mock_webassembly.js",
"v8_sanity_checks.js",
"v8_suppressions.js",
"v8_suppressions.py",
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index de3c15eab2..73db1cb0c3 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
index 1443c61f2b..3c105ef1b4 100644
--- a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/v8_commands.py b/deps/v8/tools/clusterfuzz/v8_commands.py
index e35ae1a871..e84cd915e3 100644
--- a/deps/v8/tools/clusterfuzz/v8_commands.py
+++ b/deps/v8/tools/clusterfuzz/v8_commands.py
@@ -12,11 +12,13 @@ from threading import Event, Timer
import v8_fuzz_config
+PYTHON3 = sys.version_info >= (3, 0)
+
# List of default flags passed to each d8 run.
DEFAULT_FLAGS = [
'--correctness-fuzzer-suppressions',
'--expose-gc',
- '--allow-natives-syntax',
+ '--allow-natives-for-differential-fuzzing',
'--invoke-weak-callbacks',
'--omit-quit',
'--es-staging',
@@ -28,23 +30,29 @@ DEFAULT_FLAGS = [
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
# List of files passed to each d8 run before the testcase.
-DEFAULT_FILES = [
- os.path.join(BASE_PATH, 'v8_mock.js'),
- os.path.join(BASE_PATH, 'v8_suppressions.js'),
-]
+DEFAULT_MOCK = os.path.join(BASE_PATH, 'v8_mock.js')
+
+# Suppressions on JavaScript level for known issues.
+JS_SUPPRESSIONS = os.path.join(BASE_PATH, 'v8_suppressions.js')
-# Architecture-specific mock file
+# Config-specific mock files.
ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
+WEBASSEMBLY_MOCKS = os.path.join(BASE_PATH, 'v8_mock_webassembly.js')
# Timeout in seconds for one d8 run.
TIMEOUT = 3
def _startup_files(options):
- """Default files and optional architecture-specific mock file."""
- files = DEFAULT_FILES[:]
+ """Default files and optional config-specific mock files."""
+ files = [DEFAULT_MOCK]
+ if not options.skip_suppressions:
+ files.append(JS_SUPPRESSIONS)
if options.first.arch != options.second.arch:
files.append(ARCH_MOCKS)
+ # Mock out WebAssembly when comparing with jitless mode.
+ if '--jitless' in options.first.flags + options.second.flags:
+ files.append(WEBASSEMBLY_MOCKS)
return files
@@ -101,12 +109,16 @@ class Output(object):
def Execute(args, cwd, timeout=None):
popen_args = [c for c in args if c != ""]
+ kwargs = {}
+ if PYTHON3:
+ kwargs['encoding'] = 'utf-8'
try:
process = subprocess.Popen(
args=popen_args,
stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- cwd=cwd
+ stderr=subprocess.PIPE,
+ cwd=cwd,
+ **kwargs
)
except Exception as e:
sys.stderr.write("Error executing: %s\n" % popen_args)
@@ -129,6 +141,6 @@ def Execute(args, cwd, timeout=None):
return Output(
process.returncode,
timeout_event.is_set(),
- stdout.decode('utf-8', 'replace').encode('utf-8'),
+ stdout,
process.pid,
)
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index bf3d568cd6..b6638cc377 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -25,6 +25,8 @@ from collections import namedtuple
import v8_commands
import v8_suppressions
+PYTHON3 = sys.version_info >= (3, 0)
+
CONFIGS = dict(
default=[],
ignition=[
@@ -60,13 +62,11 @@ CONFIGS = dict(
ignition_turbo_opt=[
'--always-opt',
'--no-liftoff',
- '--no-lazy-feedback-allocation'
],
ignition_turbo_opt_eager=[
'--always-opt',
'--no-lazy',
'--no-lazy-inner-functions',
- '--no-lazy-feedback-allocation',
],
jitless=[
'--jitless',
@@ -77,7 +77,6 @@ CONFIGS = dict(
slow_path_opt=[
'--always-opt',
'--force-slow-path',
- '--no-lazy-feedback-allocation',
],
trusted=[
'--no-untrusted-code-mitigations',
@@ -85,7 +84,6 @@ CONFIGS = dict(
trusted_opt=[
'--always-opt',
'--no-untrusted-code-mitigations',
- '--no-lazy-feedback-allocation',
],
)
@@ -212,6 +210,9 @@ def parse_args():
parser.add_argument(
'--skip-sanity-checks', default=False, action='store_true',
help='skip sanity checks for testing purposes')
+ parser.add_argument(
+ '--skip-suppressions', default=False, action='store_true',
+ help='skip suppressions to reproduce known issues')
# Add arguments for each run configuration.
first_config_arguments.add_arguments(parser, 'ignition')
@@ -256,16 +257,13 @@ def content_bailout(content, ignore_fun):
return False
-def pass_bailout(output, step_number):
- """Print info and return if in timeout or crash pass states."""
+def timeout_bailout(output, step_number):
+ """Print info and return if in timeout pass state."""
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
print('# V8 correctness - T-I-M-E-O-U-T %d' % step_number)
return True
- if output.HasCrashed():
- print('# V8 correctness - C-R-A-S-H %d' % step_number)
- return True
return False
@@ -287,7 +285,15 @@ def print_difference(
first_config_label = '%s,%s' % (options.first.arch, options.first.config)
second_config_label = '%s,%s' % (options.second.arch, options.second.config)
source_file_text = SOURCE_FILE_TEMPLATE % source if source else ''
- print((FAILURE_TEMPLATE % dict(
+
+ if PYTHON3:
+ first_stdout = first_config_output.stdout
+ second_stdout = second_config_output.stdout
+ else:
+ first_stdout = first_config_output.stdout.decode('utf-8', 'replace')
+ second_stdout = second_config_output.stdout.decode('utf-8', 'replace')
+
+ text = (FAILURE_TEMPLATE % dict(
configs='%s:%s' % (first_config_label, second_config_label),
source_file_text=source_file_text,
source_key=source_key,
@@ -296,13 +302,15 @@ def print_difference(
second_config_label=second_config_label,
first_config_flags=' '.join(first_command.flags),
second_config_flags=' '.join(second_command.flags),
- first_config_output=
- first_config_output.stdout.decode('utf-8', 'replace'),
- second_config_output=
- second_config_output.stdout.decode('utf-8', 'replace'),
+ first_config_output=first_stdout,
+ second_config_output=second_stdout,
source=source,
- difference=difference.decode('utf-8', 'replace'),
- )).encode('utf-8', 'replace'))
+ difference=difference,
+ ))
+ if PYTHON3:
+ print(text)
+ else:
+ print(text.encode('utf-8', 'replace'))
def main():
@@ -312,10 +320,14 @@ def main():
suppress = v8_suppressions.get_suppression(
options.first.arch, options.first.config,
options.second.arch, options.second.config,
+ options.skip_suppressions,
)
# Static bailout based on test case content or metadata.
- with open(options.testcase) as f:
+ kwargs = {}
+ if PYTHON3:
+ kwargs['encoding'] = 'utf-8'
+ with open(options.testcase, 'r', **kwargs) as f:
content = f.read()
if content_bailout(get_meta_data(content), suppress.ignore_by_metadata):
return RETURN_FAIL
@@ -332,8 +344,7 @@ def main():
if not options.skip_sanity_checks:
first_config_output = first_cmd.run(SANITY_CHECKS)
second_config_output = second_cmd.run(SANITY_CHECKS)
- difference, _ = suppress.diff(
- first_config_output.stdout, second_config_output.stdout)
+ difference, _ = suppress.diff(first_config_output, second_config_output)
if difference:
# Special source key for sanity checks so that clusterfuzz dedupes all
# cases on this in case it's hit.
@@ -345,21 +356,21 @@ def main():
first_config_output = first_cmd.run(options.testcase, verbose=True)
- # Early bailout based on first run's output.
- if pass_bailout(first_config_output, 1):
+ # Early bailout if first run was a timeout.
+ if timeout_bailout(first_config_output, 1):
return RETURN_PASS
second_config_output = second_cmd.run(options.testcase, verbose=True)
- # Bailout based on second run's output.
- if pass_bailout(second_config_output, 2):
+ # Bailout if second run was a timeout.
+ if timeout_bailout(second_config_output, 2):
return RETURN_PASS
- difference, source = suppress.diff(
- first_config_output.stdout, second_config_output.stdout)
+ difference, source = suppress.diff(first_config_output, second_config_output)
if source:
- source_key = hashlib.sha1(source).hexdigest()[:ORIGINAL_SOURCE_HASH_LENGTH]
+ long_key = hashlib.sha1(source.encode('utf-8')).hexdigest()
+ source_key = long_key[:ORIGINAL_SOURCE_HASH_LENGTH]
else:
source_key = ORIGINAL_SOURCE_DEFAULT
@@ -377,11 +388,18 @@ def main():
first_config_output, second_config_output, difference, source)
return RETURN_FAIL
- # TODO(machenbach): Figure out if we could also return a bug in case there's
- # no difference, but one of the line suppressions has matched - and without
- # the match there would be a difference.
+ # Show if a crash has happened in one of the runs and no difference was
+ # detected.
+ if first_config_output.HasCrashed():
+ print('# V8 correctness - C-R-A-S-H 1')
+ elif second_config_output.HasCrashed():
+ print('# V8 correctness - C-R-A-S-H 2')
+ else:
+ # TODO(machenbach): Figure out if we could also return a bug in case
+ # there's no difference, but one of the line suppressions has matched -
+ # and without the match there would be a difference.
+ print('# V8 correctness - pass')
- print('# V8 correctness - pass')
return RETURN_PASS
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index 6fb3345bc3..f82afc9e20 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -9,60 +9,113 @@ import subprocess
import sys
import unittest
+import v8_commands
import v8_foozzie
import v8_fuzz_config
import v8_suppressions
+try:
+ basestring
+except NameError:
+ basestring = str
+
+PYTHON3 = sys.version_info >= (3, 0)
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
TEST_DATA = os.path.join(BASE_DIR, 'testdata')
+KNOWN_BUILDS = [
+ 'd8',
+ 'clang_x86/d8',
+ 'clang_x86_v8_arm/d8',
+ 'clang_x64_v8_arm64/d8',
+ 'clang_x64_pointer_compression/d8',
+]
+
class ConfigTest(unittest.TestCase):
def testExperiments(self):
- """Test that probabilities add up to 100 and that all config names exist.
- """
- EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
+ """Test integrity of probabilities and configs."""
CONFIGS = v8_foozzie.CONFIGS
+ EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
+ FLAGS = v8_fuzz_config.ADDITIONAL_FLAGS
+ # Probabilities add up to 100%.
+ first_is_int = lambda x: type(x[0]) == int
+ assert all(map(first_is_int, EXPERIMENTS))
assert sum(x[0] for x in EXPERIMENTS) == 100
+ # Configs used in experiments are defined.
assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))
assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))
- assert all(map(lambda x: x[3].endswith('d8'), EXPERIMENTS))
+ # The last config item points to a known build configuration.
+ assert all(map(lambda x: x[3] in KNOWN_BUILDS, EXPERIMENTS))
+ # Ensure we compare different configs and same d8, or same config
+ # to different d8.
+ is_sane_comparison = lambda x: (x[1] == x[2]) == ('d8' != x[3])
+ assert all(map(is_sane_comparison, EXPERIMENTS))
+ # All flags have a probability.
+ first_is_float = lambda x: type(x[0]) == float
+ assert all(map(first_is_float, FLAGS))
+ first_between_0_and_1 = lambda x: x[0] > 0 and x[0] < 1
+ assert all(map(first_between_0_and_1, FLAGS))
+ # Test consistent flags.
+ second_is_string = lambda x: isinstance(x[1], basestring)
+ assert all(map(second_is_string, FLAGS))
+ # We allow spaces to separate more flags. We don't allow spaces in the flag
+ # value.
+ is_flag = lambda x: x.startswith('--')
+ all_parts_are_flags = lambda x: all(map(is_flag, x[1].split()))
+ assert all(map(all_parts_are_flags, FLAGS))
def testConfig(self):
- """Smoke test how to choose experiments.
-
- When experiment distribution changes this test might change, too.
- """
+ """Smoke test how to choose experiments."""
+ config = v8_fuzz_config.Config('foo', random.Random(42))
+ experiments = [
+ [25, 'ignition', 'jitless', 'd8'],
+ [75, 'ignition', 'ignition', 'clang_x86/d8'],
+ ]
+ flags = [
+ [0.1, '--flag'],
+ [0.3, '--baz'],
+ [0.3, '--foo --bar'],
+ ]
self.assertEqual(
[
'--first-config=ignition',
- '--second-config=ignition_turbo_opt',
+ '--second-config=jitless',
'--second-d8=d8',
- '--second-config-extra-flags=--stress-scavenge=100',
- '--second-config-extra-flags=--no-regexp-tier-up',
- '--second-config-extra-flags=--no-enable-ssse3',
- '--second-config-extra-flags=--no-enable-bmi2',
- '--second-config-extra-flags=--no-enable-lzcnt',
+ '--second-config-extra-flags=--baz',
+ '--second-config-extra-flags=--foo',
+ '--second-config-extra-flags=--bar',
],
- v8_fuzz_config.Config('foo', random.Random(42)).choose_foozzie_flags(),
+ config.choose_foozzie_flags(experiments, flags),
+ )
+ self.assertEqual(
+ [
+ '--first-config=ignition',
+ '--second-config=jitless',
+ '--second-d8=d8',
+ ],
+ config.choose_foozzie_flags(experiments, flags),
)
class UnitTest(unittest.TestCase):
def testDiff(self):
- # TODO(machenbach): Mock out suppression configuration.
- suppress = v8_suppressions.get_suppression(
- 'x64', 'ignition', 'x64', 'ignition_turbo')
+ def diff_fun(one, two, skip=False):
+ suppress = v8_suppressions.get_suppression(
+ 'x64', 'ignition', 'x64', 'ignition_turbo', skip)
+ return suppress.diff_lines(one.splitlines(), two.splitlines())
+
one = ''
two = ''
diff = None, None
- self.assertEquals(diff, suppress.diff(one, two))
+ self.assertEqual(diff, diff_fun(one, two))
one = 'a \n b\nc();'
two = 'a \n b\nc();'
diff = None, None
- self.assertEquals(diff, suppress.diff(one, two))
+ self.assertEqual(diff, diff_fun(one, two))
# Ignore line before caret, caret position and error message.
one = """
@@ -80,7 +133,7 @@ somefile.js: TypeError: baz is not a function
undefined
"""
diff = None, None
- self.assertEquals(diff, suppress.diff(one, two))
+ self.assertEqual(diff, diff_fun(one, two))
one = """
Still equal
@@ -90,7 +143,7 @@ Extra line
Still equal
"""
diff = '- Extra line', None
- self.assertEquals(diff, suppress.diff(one, two))
+ self.assertEqual(diff, diff_fun(one, two))
one = """
Still equal
@@ -100,7 +153,7 @@ Still equal
Extra line
"""
diff = '+ Extra line', None
- self.assertEquals(diff, suppress.diff(one, two))
+ self.assertEqual(diff, diff_fun(one, two))
one = """
undefined
@@ -112,7 +165,61 @@ otherfile.js: TypeError: undefined is not a constructor
"""
diff = """- somefile.js: TypeError: undefined is not a constructor
+ otherfile.js: TypeError: undefined is not a constructor""", None
- self.assertEquals(diff, suppress.diff(one, two))
+ self.assertEqual(diff, diff_fun(one, two))
+
+ # Test that skipping suppressions works.
+ one = """
+v8-foozzie source: foo
+23:TypeError: bar is not a function
+"""
+ two = """
+v8-foozzie source: foo
+42:TypeError: baz is not a function
+"""
+ self.assertEqual((None, 'foo'), diff_fun(one, two))
+ diff = """- 23:TypeError: bar is not a function
++ 42:TypeError: baz is not a function""", 'foo'
+ self.assertEqual(diff, diff_fun(one, two, skip=True))
+
+ def testOutputCapping(self):
+ def output(stdout, is_crash):
+ exit_code = -1 if is_crash else 0
+ return v8_commands.Output(
+ exit_code=exit_code, timed_out=False, stdout=stdout, pid=0)
+
+ def check(stdout1, stdout2, is_crash1, is_crash2, capped_lines1,
+ capped_lines2):
+ output1 = output(stdout1, is_crash1)
+ output2 = output(stdout2, is_crash2)
+ self.assertEqual(
+ (capped_lines1, capped_lines2),
+ v8_suppressions.get_output_capped(output1, output2))
+
+ # No capping, already equal.
+ check('1\n2', '1\n2', True, True, '1\n2', '1\n2')
+ # No crash, no capping.
+ check('1\n2', '1\n2\n3', False, False, '1\n2', '1\n2\n3')
+ check('1\n2\n3', '1\n2', False, False, '1\n2\n3', '1\n2')
+ # Cap smallest if all runs crash.
+ check('1\n2', '1\n2\n3', True, True, '1\n2', '1\n2')
+ check('1\n2\n3', '1\n2', True, True, '1\n2', '1\n2')
+ check('1\n2', '1\n23', True, True, '1\n2', '1\n2')
+ check('1\n23', '1\n2', True, True, '1\n2', '1\n2')
+ # Cap the non-crashy run.
+ check('1\n2\n3', '1\n2', False, True, '1\n2', '1\n2')
+ check('1\n2', '1\n2\n3', True, False, '1\n2', '1\n2')
+ check('1\n23', '1\n2', False, True, '1\n2', '1\n2')
+ check('1\n2', '1\n23', True, False, '1\n2', '1\n2')
+ # The crashy run has more output.
+ check('1\n2\n3', '1\n2', True, False, '1\n2\n3', '1\n2')
+ check('1\n2', '1\n2\n3', False, True, '1\n2', '1\n2\n3')
+ check('1\n23', '1\n2', True, False, '1\n23', '1\n2')
+ check('1\n2', '1\n23', False, True, '1\n2', '1\n23')
+ # Keep output difference when capping.
+ check('1\n2', '3\n4\n5', True, True, '1\n2', '3\n4')
+ check('1\n2\n3', '4\n5', True, True, '1\n2', '4\n5')
+ check('12', '345', True, True, '12', '34')
+ check('123', '45', True, True, '12', '45')
def cut_verbose_output(stdout):
@@ -120,17 +227,22 @@ def cut_verbose_output(stdout):
return '\n'.join(stdout.split('\n')[4:])
-def run_foozzie(second_d8_dir, *extra_flags):
+def run_foozzie(second_d8_dir, *extra_flags, **kwargs):
+ second_config = 'ignition_turbo'
+ if 'second_config' in kwargs:
+ second_config = 'jitless'
+ kwargs = {}
+ if PYTHON3:
+ kwargs['text'] = True
return subprocess.check_output([
sys.executable, FOOZZIE,
'--random-seed', '12345',
'--first-d8', os.path.join(TEST_DATA, 'baseline', 'd8.py'),
'--second-d8', os.path.join(TEST_DATA, second_d8_dir, 'd8.py'),
'--first-config', 'ignition',
- '--second-config', 'ignition_turbo',
+ '--second-config', second_config,
os.path.join(TEST_DATA, 'fuzz-123.js'),
- ] + list(extra_flags))
-
+ ] + list(extra_flags), **kwargs)
class SystemTest(unittest.TestCase):
"""This tests the whole correctness-fuzzing harness with fake build
@@ -146,7 +258,12 @@ class SystemTest(unittest.TestCase):
"""
def testSyntaxErrorDiffPass(self):
stdout = run_foozzie('build1', '--skip-sanity-checks')
- self.assertEquals('# V8 correctness - pass\n', cut_verbose_output(stdout))
+ self.assertEqual('# V8 correctness - pass\n', cut_verbose_output(stdout))
+ # Default comparison includes suppressions.
+ self.assertIn('v8_suppressions.js', stdout)
+ # Default comparison doesn't include any specific mock files.
+ self.assertNotIn('v8_mock_archs.js', stdout)
+ self.assertNotIn('v8_mock_webassembly.js', stdout)
def testDifferentOutputFail(self):
with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
@@ -157,8 +274,8 @@ class SystemTest(unittest.TestCase):
'--first-config-extra-flags=--flag2=0',
'--second-config-extra-flags=--flag3')
e = ctx.exception
- self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
- self.assertEquals(expected_output, cut_verbose_output(e.output))
+ self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+ self.assertEqual(expected_output, cut_verbose_output(e.output))
def testSanityCheck(self):
with open(os.path.join(TEST_DATA, 'sanity_check_output.txt')) as f:
@@ -166,8 +283,8 @@ class SystemTest(unittest.TestCase):
with self.assertRaises(subprocess.CalledProcessError) as ctx:
run_foozzie('build2')
e = ctx.exception
- self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
- self.assertEquals(expected_output, e.output)
+ self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+ self.assertEqual(expected_output, e.output)
def testDifferentArch(self):
"""Test that the architecture-specific mocks are passed to both runs when
@@ -181,6 +298,34 @@ class SystemTest(unittest.TestCase):
self.assertIn('v8_mock_archs.js', lines[1])
self.assertIn('v8_mock_archs.js', lines[3])
+ def testJitless(self):
+ """Test that webassembly is mocked out when comparing with jitless."""
+ stdout = run_foozzie(
+ 'build1', '--skip-sanity-checks', second_config='jitless')
+ lines = stdout.split('\n')
+ # TODO(machenbach): Don't depend on the command-lines being printed in
+ # particular lines.
+ self.assertIn('v8_mock_webassembly.js', lines[1])
+ self.assertIn('v8_mock_webassembly.js', lines[3])
+
+ def testSkipSuppressions(self):
+ """Test that the suppressions file is not passed when skipping
+ suppressions.
+ """
+ # Compare baseline with baseline. This passes as there is no difference.
+ stdout = run_foozzie(
+ 'baseline', '--skip-sanity-checks', '--skip-suppressions')
+ self.assertNotIn('v8_suppressions.js', stdout)
+
+ # Compare with a build that usually suppresses a difference. Now we fail
+ # since we skip suppressions.
+ with self.assertRaises(subprocess.CalledProcessError) as ctx:
+ run_foozzie(
+ 'build1', '--skip-sanity-checks', '--skip-suppressions')
+ e = ctx.exception
+ self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+ self.assertNotIn('v8_suppressions.js', e.output)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index cc68a7dd4c..99439a9d66 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -2,62 +2,23 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import json
+import os
import random
+THIS_DIR = os.path.dirname(os.path.abspath(__file__))
+
# List of configuration experiments for correctness fuzzing.
# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
# Probabilities must add up to 100.
-FOOZZIE_EXPERIMENTS = [
- [10, 'ignition', 'jitless', 'd8'],
- [10, 'ignition', 'slow_path', 'd8'],
- [5, 'ignition', 'slow_path_opt', 'd8'],
- [25, 'ignition', 'ignition_turbo', 'd8'],
- [2, 'ignition_no_ic', 'ignition_turbo', 'd8'],
- [2, 'ignition', 'ignition_turbo_no_ic', 'd8'],
- [15, 'ignition', 'ignition_turbo_opt', 'd8'],
- [2, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
- [4, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x64_pointer_compression/d8'],
- [5, 'ignition_turbo', 'ignition_turbo', 'clang_x64_pointer_compression/d8'],
- [4, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
- [4, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
- [4, 'ignition', 'ignition', 'clang_x86/d8'],
- [4, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
- [4, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
-]
+with open(os.path.join(THIS_DIR, 'v8_fuzz_experiments.json')) as f:
+ FOOZZIE_EXPERIMENTS = json.load(f)
# Additional flag experiments. List of tuples like
# (<likelihood to use flags in [0,1)>, <flag>).
-ADDITIONAL_FLAGS = [
- (0.1, '--stress-marking=100'),
- (0.1, '--stress-scavenge=100'),
- (0.1, '--stress-compaction-random'),
- (0.1, '--random-gc-interval=2000'),
- (0.2, '--noanalyze-environment-liveness'),
- (0.1, '--stress-delay-tasks'),
- (0.01, '--thread-pool-size=1'),
- (0.01, '--thread-pool-size=2'),
- (0.01, '--thread-pool-size=4'),
- (0.01, '--thread-pool-size=8'),
- (0.1, '--interrupt-budget=1000'),
- (0.25, '--future'),
- (0.2, '--no-regexp-tier-up'),
- (0.1, '--regexp-interpret-all'),
- (0.1, '--regexp-tier-up-ticks=10'),
- (0.1, '--regexp-tier-up-ticks=100'),
- (0.1, '--turbo-instruction-scheduling'),
- (0.1, '--turbo-stress-instruction-scheduling'),
- (0.1, '--no-enable-sse3'),
- (0.1, '--no-enable-ssse3'),
- (0.1, '--no-enable-sse4_1'),
- (0.1, '--no-enable-sse4_2'),
- (0.1, '--no-enable-sahf'),
- (0.1, '--no-enable-avx'),
- (0.1, '--no-enable-fma3'),
- (0.1, '--no-enable-bmi1'),
- (0.1, '--no-enable-bmi2'),
- (0.1, '--no-enable-lzcnt'),
- (0.1, '--no-enable-popcnt'),
-]
+with open(os.path.join(THIS_DIR, 'v8_fuzz_flags.json')) as f:
+ ADDITIONAL_FLAGS = json.load(f)
+
class Config(object):
def __init__(self, name, rng=None):
@@ -70,22 +31,29 @@ class Config(object):
self.name = name
self.rng = rng or random.Random()
- def choose_foozzie_flags(self):
+ def choose_foozzie_flags(self, foozzie_experiments=None, additional_flags=None):
"""Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
+ Args:
+ foozzie_experiments: Override experiment config for testing.
+ additional_flags: Override additional flags for testing.
+
Returns: List of flags to pass to v8_foozzie.py fuzz harness.
"""
+ foozzie_experiments = foozzie_experiments or FOOZZIE_EXPERIMENTS
+ additional_flags = additional_flags or ADDITIONAL_FLAGS
# Add additional flags to second config based on experiment percentages.
extra_flags = []
- for p, flag in ADDITIONAL_FLAGS:
+ for p, flags in additional_flags:
if self.rng.random() < p:
- extra_flags.append('--second-config-extra-flags=%s' % flag)
+ for flag in flags.split():
+ extra_flags.append('--second-config-extra-flags=%s' % flag)
# Calculate flags determining the experiment.
acc = 0
threshold = self.rng.random() * 100
- for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
+ for prob, first_config, second_config, second_d8 in foozzie_experiments:
acc += prob
if acc > threshold:
return [
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json b/deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json
new file mode 100644
index 0000000000..e8a6f7a8da
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_experiments.json
@@ -0,0 +1,17 @@
+[
+ [10, "ignition", "jitless", "d8"],
+ [10, "ignition", "slow_path", "d8"],
+ [5, "ignition", "slow_path_opt", "d8"],
+ [25, "ignition", "ignition_turbo", "d8"],
+ [2, "ignition_no_ic", "ignition_turbo", "d8"],
+ [2, "ignition", "ignition_turbo_no_ic", "d8"],
+ [15, "ignition", "ignition_turbo_opt", "d8"],
+ [5, "ignition_no_ic", "ignition_turbo_opt", "d8"],
+ [3, "ignition_turbo_opt", "ignition_turbo_opt", "clang_x64_pointer_compression/d8"],
+ [3, "ignition_turbo", "ignition_turbo", "clang_x64_pointer_compression/d8"],
+ [4, "ignition", "ignition", "clang_x86/d8"],
+ [4, "ignition_turbo", "ignition_turbo", "clang_x86/d8"],
+ [4, "ignition_turbo_opt", "ignition_turbo_opt", "clang_x86/d8"],
+ [4, "ignition_turbo", "ignition_turbo", "clang_x64_v8_arm64/d8"],
+ [4, "ignition_turbo", "ignition_turbo", "clang_x86_v8_arm/d8"]
+] \ No newline at end of file
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json b/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
new file mode 100644
index 0000000000..832a7c35b9
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
@@ -0,0 +1,34 @@
+[
+ [0.1, "--stress-marking=100"],
+ [0.1, "--stress-scavenge=100"],
+ [0.1, "--stress-compaction-random"],
+ [0.1, "--random-gc-interval=2000"],
+ [0.2, "--noanalyze-environment-liveness"],
+ [0.1, "--stress-delay-tasks"],
+ [0.01, "--thread-pool-size=1"],
+ [0.01, "--thread-pool-size=2"],
+ [0.01, "--thread-pool-size=4"],
+ [0.01, "--thread-pool-size=8"],
+ [0.1, "--interrupt-budget=1000"],
+ [0.25, "--future"],
+ [0.2, "--no-regexp-tier-up"],
+ [0.1, "--regexp-interpret-all"],
+ [0.1, "--regexp-tier-up-ticks=10"],
+ [0.1, "--regexp-tier-up-ticks=100"],
+ [0.1, "--turbo-instruction-scheduling"],
+ [0.1, "--turbo-stress-instruction-scheduling"],
+ [0.1, "--no-enable-sse3"],
+ [0.1, "--no-enable-ssse3"],
+ [0.1, "--no-enable-sse4_1"],
+ [0.1, "--no-enable-sse4_2"],
+ [0.1, "--no-enable-sahf"],
+ [0.1, "--no-enable-avx"],
+ [0.1, "--no-enable-fma3"],
+ [0.1, "--no-enable-bmi1"],
+ [0.1, "--no-enable-bmi2"],
+ [0.1, "--no-enable-lzcnt"],
+ [0.1, "--no-enable-popcnt"],
+ [0.25, "--no-lazy-feedback-allocation"],
+ [0.1, "--no-lazy-feedback-allocation --interrupt-budget=100"],
+ [0.05, "--budget-for-feedback-vector-allocation=0"]
+] \ No newline at end of file
diff --git a/deps/v8/tools/clusterfuzz/v8_mock.js b/deps/v8/tools/clusterfuzz/v8_mock.js
index b805ccf772..618e14c784 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock.js
@@ -13,8 +13,8 @@
var prettyPrinted = function prettyPrinted(msg) { return msg; };
// Mock Math.random.
-(function () {
- var index = 0
+(function() {
+ let index = 0
Math.random = function() {
index = (index + 1) % 10;
return index / 10.0;
@@ -22,55 +22,47 @@ var prettyPrinted = function prettyPrinted(msg) { return msg; };
})();
// Mock Date.
-(function () {
- var index = 0
- var mockDate = 1477662728696
- var mockDateNow = function() {
- index = (index + 1) % 10
- mockDate = mockDate + index + 1
- return mockDate
+(function() {
+ let index = 0;
+ let mockDate = 1477662728696;
+ const mockDateNow = function() {
+ index = (index + 1) % 10;
+ mockDate = mockDate + index + 1;
+ return mockDate;
}
- var origDate = Date;
- var constructDate = function(args) {
- if (args.length == 1) {
- var result = new origDate(args[0]);
- } else if (args.length == 2) {
- var result = new origDate(args[0], args[1]);
- } else if (args.length == 3) {
- var result = new origDate(args[0], args[1], args[2]);
- } else if (args.length == 4) {
- var result = new origDate(args[0], args[1], args[2], args[3]);
- } else if (args.length == 5) {
- var result = new origDate(args[0], args[1], args[2], args[3], args[4]);
- } else if (args.length == 6) {
- var result = new origDate(
- args[0], args[1], args[2], args[3], args[4], args[5]);
- } else if (args.length >= 7) {
- var result = new origDate(
- args[0], args[1], args[2], args[3], args[4], args[5], args[6]);
+ const origDate = Date;
+ const construct = Reflect.construct;
+ const constructDate = function(args) {
+ let result;
+ if (args.length) {
+ result = construct(origDate, args);
} else {
- var result = new origDate(mockDateNow());
+ result = new origDate(mockDateNow());
}
result.constructor = function(...args) { return constructDate(args); }
Object.defineProperty(
result, "constructor", { configurable: false, writable: false });
- return result
+ return result;
}
+ origDate.prototype.constructor = function(...args) {
+ return constructDate(args);
+ };
+
var handler = {
- apply: function (target, thisArg, args) {
- return constructDate(args)
+ apply: function(target, thisArg, args) {
+ return constructDate(args);
},
- construct: function (target, args, newTarget) {
- return constructDate(args)
+ construct: function(target, args, newTarget) {
+ return constructDate(args);
},
get: function(target, property, receiver) {
if (property == "now") {
return mockDateNow;
}
if (property == "prototype") {
- return origDate.prototype
+ return origDate.prototype;
}
},
}
@@ -79,37 +71,65 @@ var prettyPrinted = function prettyPrinted(msg) { return msg; };
})();
// Mock performance methods.
-performance.now = function () { return 1.2; }
-performance.measureMemory = function () { return []; }
+performance.now = function() { return 1.2; };
+performance.measureMemory = function() { return []; };
// Mock readline so that test cases don't hang.
-readline = function () { return "foo"; }
+readline = function() { return "foo"; };
// Mock stack traces.
-Error.prepareStackTrace = function (error, structuredStackTrace) {
+Error.prepareStackTrace = function(error, structuredStackTrace) {
return "";
};
Object.defineProperty(
Error, 'prepareStackTrace', { configurable: false, writable: false });
// Mock buffer access in float typed arrays because of varying NaN patterns.
-// Note, for now we just use noop forwarding proxies, because they already
-// turn off optimizations.
-(function () {
- var mock = function(arrayType) {
- var handler = {
+(function() {
+ const origIsNaN = isNaN;
+ const deNaNify = function(value) { return origIsNaN(value) ? 1 : value; };
+ const mock = function(type) {
+
+ // Remove NaN values from parameters to "set" function.
+ const set = type.prototype.set;
+ type.prototype.set = function(array, offset) {
+ if (Array.isArray(array)) {
+ array = array.map(deNaNify);
+ }
+ set.apply(this, [array, offset]);
+ };
+
+ const handler = {
+ // Remove NaN values from parameters to constructor.
construct: function(target, args) {
- var obj = new (Function.prototype.bind.apply(arrayType, [null].concat(args)));
+ for (let i = 0; i < args.length; i++) {
+ if (args[i] != null &&
+ typeof args[i][Symbol.iterator] === 'function') {
+ // Consume iterators.
+ args[i] = Array.from(args[i]);
+ }
+ if (Array.isArray(args[i])) {
+ args[i] = args[i].map(deNaNify);
+ }
+ }
+
+ const obj = new (
+ Function.prototype.bind.call(type, null, ...args));
return new Proxy(obj, {
get: function(x, prop) {
if (typeof x[prop] == "function")
- return x[prop].bind(obj)
+ return x[prop].bind(obj);
return x[prop];
},
+ // Remove NaN values that get assigned.
+ set: function(target, prop, value, receiver) {
+ target[prop] = deNaNify(value);
+ return value;
+ }
});
},
};
- return new Proxy(arrayType, handler);
+ return new Proxy(type, handler);
}
Float32Array = mock(Float32Array);
@@ -117,11 +137,11 @@ Object.defineProperty(
})();
// Mock Worker.
-(function () {
- var index = 0;
+(function() {
+ let index = 0;
// TODO(machenbach): Randomize this for each test case, but keep stable
// during comparison. Also data and random above.
- var workerMessages = [
+ const workerMessages = [
undefined, 0, -1, "", "foo", 42, [], {}, [0], {"x": 0}
];
Worker = function(code){
diff --git a/deps/v8/tools/clusterfuzz/v8_mock_archs.js b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
index 33883775f5..3482e8c4c6 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock_archs.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
@@ -10,23 +10,40 @@
// This file is loaded before each correctness test cases and won't get
// minimized.
-// Mock maximum typed-array length and limit to 1MiB.
-(function () {
+// Mock maximum typed-array buffer and limit to 1MiB. Otherwise we might
+// get range errors. We ignore those by crashing, but that reduces coverage,
+// hence, let's reduce the range-error rate.
+(function() {
// Math.min might be manipulated in test cases.
- let min = Math.min;
- let mock = function(arrayType) {
- let handler = {
+ const min = Math.min;
+ const maxBytes = 1048576;
+ const mock = function(type) {
+ const maxLength = maxBytes / (type.BYTES_PER_ELEMENT || 1);
+ const handler = {
construct: function(target, args) {
- for (let i = 0; i < args.length; i++) {
- if (typeof args[i] != "object") {
- args[i] = min(1048576, args[i]);
+ if (args[0] && typeof args[0] != "object") {
+ // Length used as first argument.
+ args[0] = min(maxLength, Number(args[0]));
+ } else if (args[0] instanceof ArrayBuffer && args.length > 1) {
+ // Buffer used as first argument.
+ const buffer = args[0];
+ args[1] = Number(args[1]);
+ // Ensure offset is multiple of bytes per element.
+ args[1] = args[1] - (args[1] % type.BYTES_PER_ELEMENT);
+ // Limit offset to length of buffer.
+ args[1] = min(args[1], buffer.byteLength || 0);
+ if (args.length > 2) {
+ // If also length is given, limit it to the maximum that's possible
+ // given buffer and offset.
+ const maxBytesLeft = buffer.byteLength - args[1];
+ const maxLengthLeft = maxBytesLeft / type.BYTES_PER_ELEMENT;
+ args[2] = min(Number(args[2]), maxLengthLeft);
}
}
- return new (
- Function.prototype.bind.apply(arrayType, [null].concat(args)));
+ return new (Function.prototype.bind.apply(type, [null].concat(args)));
},
};
- return new Proxy(arrayType, handler);
+ return new Proxy(type, handler);
}
ArrayBuffer = mock(ArrayBuffer);
@@ -44,9 +61,11 @@
Float64Array = mock(Float64Array);
})();
-// Mock typed array set function and limit maximum offset to 1MiB.
-(function () {
- let typedArrayTypes = [
+// Mock typed array set function and cap offset to not throw a range error.
+(function() {
+ // Math.min might be manipulated in test cases.
+ const min = Math.min;
+ const types = [
Int8Array,
Uint8Array,
Uint8ClampedArray,
@@ -59,10 +78,14 @@
Float32Array,
Float64Array,
];
- for (let typedArrayType of typedArrayTypes) {
- let set = typedArrayType.prototype.set
- typedArrayType.prototype.set = function(array, offset) {
- set.apply(this, [array, offset > 1048576 ? 1048576 : offset])
+ for (const type of types) {
+ const set = type.prototype.set;
+ type.prototype.set = function(array, offset) {
+ if (Array.isArray(array)) {
+ offset = Number(offset);
+ offset = min(offset, this.length - array.length);
+ }
+ set.call(this, array, offset);
};
}
})();
diff --git a/deps/v8/tools/clusterfuzz/v8_mock_webassembly.js b/deps/v8/tools/clusterfuzz/v8_mock_webassembly.js
new file mode 100644
index 0000000000..594e6e7004
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_mock_webassembly.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This mocks out the WebAssembly object with a permissive dummy.
+
+(function() {
+ const handler = {
+ get: function(x, prop) {
+ if (prop == Symbol.toPrimitive) {
+ return function() { return undefined; };
+ }
+ return dummy;
+ },
+ };
+ const dummy = new Proxy(function() { return dummy; }, handler);
+ WebAssembly = dummy;
+})();
diff --git a/deps/v8/tools/clusterfuzz/v8_suppressions.js b/deps/v8/tools/clusterfuzz/v8_suppressions.js
index 011e7272ba..d73ce04d45 100644
--- a/deps/v8/tools/clusterfuzz/v8_suppressions.js
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.js
@@ -6,28 +6,3 @@
// You can temporarily change JS behavior here to silence known problems.
// Please refer to a bug in a comment and remove the suppression once the
// problem is fixed.
-
-// Suppress http://crbug.com/662429
-(function () {
- var oldMathPow = Math.pow
- Math.pow = function(a, b){
- if (b < 0) {
- return 0.000017;
- } else {
- return oldMathPow(a, b);
- }
- }
-})();
-
-// Suppress http://crbug.com/693426
-(function () {
- var oldMathPow = Math.pow
- Math.pow = function(a, b){
- var s = "" + oldMathPow(a, b)
- // Low tech precision mock. Limit digits in string representation.
- // The phrases Infinity and NaN don't match the split("e").
- s = s.split("e");
- s[0] = s[0].substr(0, 17);
- return parseFloat(s.join("e"));
- }
-})();
diff --git a/deps/v8/tools/clusterfuzz/v8_suppressions.py b/deps/v8/tools/clusterfuzz/v8_suppressions.py
index f1aaa6448a..a3fa351f68 100644
--- a/deps/v8/tools/clusterfuzz/v8_suppressions.py
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.py
@@ -27,6 +27,13 @@ to silence a particular class of problems.
import itertools
import re
+try:
+ # Python 3
+ from itertools import zip_longest
+except ImportError:
+ # Python 2
+ from itertools import izip_longest as zip_longest
+
# Max line length for regular experessions checking for lines to ignore.
MAX_LINE_LENGTH = 512
@@ -37,32 +44,6 @@ CARET_RE = re.compile(r'^\s*\^\s*$')
# V8, e.g. '/v8/test/mjsunit/d8-performance-now.js' including /v8/. A test will
# be suppressed if one of the files below was used to mutate the test.
IGNORE_SOURCES = {
- # This contains a usage of f.arguments that often fires.
- 'crbug.com/662424': [
- '/v8/test/mjsunit/bugs/bug-222.js',
- '/v8/test/mjsunit/bugs/bug-941049.js',
- '/v8/test/mjsunit/regress/regress-crbug-668795.js',
- '/v8/test/mjsunit/regress/regress-1079.js',
- '/v8/test/mjsunit/regress/regress-2989.js',
- ],
-
- 'crbug.com/688159': [
- '/v8/test/mjsunit/es7/exponentiation-operator.js',
- ],
-
- # TODO(machenbach): Implement blacklisting files for particular configs only,
- # here ignition_eager.
- 'crbug.com/691589': [
- '/v8/test/mjsunit/regress/regress-1200351.js',
- ],
-
- 'crbug.com/691587': [
- '/v8/test/mjsunit/asm/regress-674089.js',
- ],
-
- 'crbug.com/774805': [
- '/v8/test/mjsunit/console.js',
- ],
}
# Ignore by test case pattern. Map from config->bug->regexp. Config '' is used
@@ -81,10 +62,6 @@ IGNORE_TEST_CASES = {
# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_OUTPUT = {
'': {
- 'crbug.com/664068':
- re.compile(r'RangeError(?!: byte length)', re.S),
- 'crbug.com/667678':
- re.compile(r'\[native code\]', re.S),
'crbug.com/689877':
re.compile(r'^.*SyntaxError: .*Stack overflow$', re.M),
},
@@ -116,9 +93,6 @@ ALLOWED_LINE_DIFFS = [
# crbug.com/680064. This subsumes one of the above expressions.
r'^(.*)TypeError: .* function$',
-
- # crbug.com/664068
- r'^(.*)(?:Array buffer allocation failed|Invalid array buffer length)(.*)$',
]
# Lines matching any of the following regular expressions will be ignored.
@@ -142,8 +116,33 @@ IGNORE_LINES = [re.compile(exp) for exp in IGNORE_LINES]
ORIGINAL_SOURCE_PREFIX = 'v8-foozzie source: '
+
+def get_output_capped(output1, output2):
+ """Returns a pair of stdout strings.
+
+ The strings are safely capped if at least one run has crashed.
+ """
+
+ # No length difference or no crash -> no capping.
+ if (len(output1.stdout) == len(output2.stdout) or
+ (not output1.HasCrashed() and not output2.HasCrashed())):
+ return output1.stdout, output2.stdout
+
+ # Both runs have crashed, cap by the shorter output.
+ if output1.HasCrashed() and output2.HasCrashed():
+ cap = min(len(output1.stdout), len(output2.stdout))
+ # Only the first run has crashed, cap by its output length.
+ elif output1.HasCrashed():
+ cap = len(output1.stdout)
+ # Similar if only the second run has crashed.
+ else:
+ cap = len(output2.stdout)
+
+ return output1.stdout[0:cap], output2.stdout[0:cap]
+
+
def line_pairs(lines):
- return itertools.izip_longest(
+ return zip_longest(
lines, itertools.islice(lines, 1, None), fillvalue=None)
@@ -191,14 +190,14 @@ def diff_output(output1, output2, allowed, ignore1, ignore2):
return all(not e.match(line) for e in ignore)
return fun
- lines1 = filter(useful_line(ignore1), output1)
- lines2 = filter(useful_line(ignore2), output2)
+ lines1 = list(filter(useful_line(ignore1), output1))
+ lines2 = list(filter(useful_line(ignore2), output2))
# This keeps track where we are in the original source file of the fuzz
# test case.
source = None
- for ((line1, lookahead1), (line2, lookahead2)) in itertools.izip_longest(
+ for ((line1, lookahead1), (line2, lookahead2)) in zip_longest(
line_pairs(lines1), line_pairs(lines2), fillvalue=(None, None)):
# Only one of the two iterators should run out.
@@ -237,8 +236,8 @@ def diff_output(output1, output2, allowed, ignore1, ignore2):
return None, source
-def get_suppression(arch1, config1, arch2, config2):
- return V8Suppression(arch1, config1, arch2, config2)
+def get_suppression(arch1, config1, arch2, config2, skip=False):
+ return V8Suppression(arch1, config1, arch2, config2, skip)
class Suppression(object):
@@ -259,17 +258,30 @@ class Suppression(object):
class V8Suppression(Suppression):
- def __init__(self, arch1, config1, arch2, config2):
+ def __init__(self, arch1, config1, arch2, config2, skip):
self.arch1 = arch1
self.config1 = config1
self.arch2 = arch2
self.config2 = config2
+ if skip:
+ self.allowed_line_diffs = []
+ self.ignore_output = {}
+ self.ignore_sources = {}
+ else:
+ self.allowed_line_diffs = ALLOWED_LINE_DIFFS
+ self.ignore_output = IGNORE_OUTPUT
+ self.ignore_sources = IGNORE_SOURCES
def diff(self, output1, output2):
+ # Diff capped lines in the presence of crashes.
+ return self.diff_lines(
+ *map(str.splitlines, get_output_capped(output1, output2)))
+
+ def diff_lines(self, output1_lines, output2_lines):
return diff_output(
- output1.splitlines(),
- output2.splitlines(),
- ALLOWED_LINE_DIFFS,
+ output1_lines,
+ output2_lines,
+ self.allowed_line_diffs,
IGNORE_LINES,
IGNORE_LINES,
)
@@ -286,13 +298,13 @@ class V8Suppression(Suppression):
# already minimized test cases might have dropped the delimiter line.
content = testcase
for key in ['', self.arch1, self.arch2, self.config1, self.config2]:
- for bug, exp in IGNORE_TEST_CASES.get(key, {}).iteritems():
+ for bug, exp in IGNORE_TEST_CASES.get(key, {}).items():
if exp.search(content):
return bug
return None
def ignore_by_metadata(self, metadata):
- for bug, sources in IGNORE_SOURCES.iteritems():
+ for bug, sources in self.ignore_sources.items():
for source in sources:
if source in metadata['sources']:
return bug
@@ -306,12 +318,12 @@ class V8Suppression(Suppression):
def ignore_by_output(self, output, arch, config):
def check(mapping):
- for bug, exp in mapping.iteritems():
+ for bug, exp in mapping.items():
if exp.search(output):
return bug
return None
for key in ['', arch, config]:
- bug = check(IGNORE_OUTPUT.get(key, {}))
+ bug = check(self.ignore_output.get(key, {}))
if bug:
return bug
return None
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
index df6770f9a8..71e3e6a5d8 100644
--- a/deps/v8/tools/codemap.js
+++ b/deps/v8/tools/codemap.js
@@ -140,7 +140,7 @@ CodeMap.prototype.addStaticCode = function(
CodeMap.prototype.markPages_ = function(start, end) {
for (var addr = start; addr <= end;
addr += CodeMap.PAGE_SIZE) {
- this.pages_[addr >>> CodeMap.PAGE_ALIGNMENT] = 1;
+ this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
}
};
@@ -187,7 +187,7 @@ CodeMap.prototype.findInTree_ = function(tree, addr) {
* @param {number} addr Address.
*/
CodeMap.prototype.findAddress = function(addr) {
- var pageAddr = addr >>> CodeMap.PAGE_ALIGNMENT;
+ var pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
if (pageAddr in this.pages_) {
// Static code entries can contain "holes" of unnamed code.
// In this case, the whole library is assigned to this address.
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
index 10b94a5295..b151e12918 100644
--- a/deps/v8/tools/debug_helper/BUILD.gn
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -32,15 +32,11 @@ action("run_mkgrokdump") {
testonly = true
visibility = [ ":*" ]
- deps = [
- "../../test/mkgrokdump:mkgrokdump($v8_generator_toolchain)",
- ]
+ deps = [ "../../test/mkgrokdump:mkgrokdump($v8_generator_toolchain)" ]
script = "../run.py"
- outputs = [
- "$target_gen_dir/v8heapconst.py",
- ]
+ outputs = [ "$target_gen_dir/v8heapconst.py" ]
args = [
"./" + rebase_path(
@@ -56,13 +52,9 @@ action("run_mkgrokdump") {
action("gen_heap_constants") {
testonly = true
visibility = [ ":*" ]
- deps = [
- ":run_mkgrokdump",
- ]
+ deps = [ ":run_mkgrokdump" ]
script = "gen-heap-constants.py"
- outputs = [
- "$target_gen_dir/heap-constants-gen.cc",
- ]
+ outputs = [ "$target_gen_dir/heap-constants-gen.cc" ]
args = [
rebase_path(target_gen_dir, root_build_dir),
rebase_path("$target_gen_dir/heap-constants-gen.cc", root_build_dir),
@@ -72,13 +64,12 @@ action("gen_heap_constants") {
v8_component("v8_debug_helper") {
testonly = true
- public = [
- "debug-helper.h",
- ]
+ public = [ "debug-helper.h" ]
sources = [
"$target_gen_dir/../../torque-generated/class-debug-readers-tq.cc",
"$target_gen_dir/../../torque-generated/class-debug-readers-tq.h",
+ "$target_gen_dir/../../torque-generated/instance-types-tq.h",
"$target_gen_dir/heap-constants-gen.cc",
"debug-helper-internal.cc",
"debug-helper-internal.h",
@@ -91,6 +82,7 @@ v8_component("v8_debug_helper") {
deps = [
":gen_heap_constants",
+ "../..:generate_bytecode_builtins_list",
"../..:run_torque",
"../..:v8_headers",
"../..:v8_libbase",
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.cc b/deps/v8/tools/debug_helper/debug-helper-internal.cc
index 597ea7a639..c64d0ad58c 100644
--- a/deps/v8/tools/debug_helper/debug-helper-internal.cc
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.cc
@@ -8,7 +8,9 @@
namespace i = v8::internal;
-namespace v8_debug_helper_internal {
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
bool IsPointerCompressed(uintptr_t address) {
#if COMPRESS_POINTERS_BOOL
@@ -58,4 +60,6 @@ bool TqObject::IsSuperclassOf(const TqObject* other) const {
return GetName() != other->GetName();
}
-} // namespace v8_debug_helper_internal
+} // namespace debug_helper_internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/tools/debug_helper/debug-helper-internal.h b/deps/v8/tools/debug_helper/debug-helper-internal.h
index 9c498cd5d6..c9812b7635 100644
--- a/deps/v8/tools/debug_helper/debug-helper-internal.h
+++ b/deps/v8/tools/debug_helper/debug-helper-internal.h
@@ -19,7 +19,9 @@
namespace d = v8::debug_helper;
-namespace v8_debug_helper_internal {
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
// A value that was read from the debuggee's memory.
template <typename TValue>
@@ -119,7 +121,7 @@ class ObjectProperty : public PropertyBase {
class ObjectPropertiesResult;
struct ObjectPropertiesResultExtended : public d::ObjectPropertiesResult {
// Back reference for cleanup.
- v8_debug_helper_internal::ObjectPropertiesResult* base;
+ debug_helper_internal::ObjectPropertiesResult* base;
};
// Internal version of API class v8::debug_helper::ObjectPropertiesResult.
@@ -189,6 +191,14 @@ class TqObject {
uintptr_t address_;
};
+// A helpful template so that generated code can be sure that a string type name
+// actually resolves to a type, by repeating the name as the template parameter
+// and the value.
+template <typename T>
+const char* CheckTypeName(const char* name) {
+ return name;
+}
+
// In ptr-compr builds, returns whether the address looks like a compressed
// pointer (zero-extended from 32 bits). Otherwise returns false because no
// pointers can be compressed.
@@ -207,6 +217,8 @@ d::PropertyKind GetArrayKind(d::MemoryAccessResult mem_result);
// Torque class definitions.
extern const d::ClassList kObjectClassList;
-} // namespace v8_debug_helper_internal
+} // namespace debug_helper_internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/tools/debug_helper/debug-helper.h b/deps/v8/tools/debug_helper/debug-helper.h
index cbdd6ff881..df850e81ed 100644
--- a/deps/v8/tools/debug_helper/debug-helper.h
+++ b/deps/v8/tools/debug_helper/debug-helper.h
@@ -71,7 +71,12 @@ struct PropertyBase {
// Statically-determined type, such as from .tq definition. Can be an empty
// string if this property is itself a Torque-defined struct; in that case use
- // |struct_fields| instead.
+ // |struct_fields| instead. This type should be treated as if it were used in
+ // the v8::internal namespace; that is, type "X::Y" can mean any of the
+ // following, in order of decreasing preference:
+ // - v8::internal::X::Y
+ // - v8::X::Y
+ // - X::Y
const char* type;
// In some cases, |type| may be a simple type representing a compressed
diff --git a/deps/v8/tools/debug_helper/gen-heap-constants.py b/deps/v8/tools/debug_helper/gen-heap-constants.py
index 1d81f2e510..6eb7f3743c 100644
--- a/deps/v8/tools/debug_helper/gen-heap-constants.py
+++ b/deps/v8/tools/debug_helper/gen-heap-constants.py
@@ -19,7 +19,9 @@ out = """
#include "src/common/ptr-compr-inl.h"
#include "tools/debug_helper/debug-helper-internal.h"
-namespace v8_debug_helper_internal {
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
"""
def iterate_objects(target_space, camel_space_name):
@@ -70,7 +72,7 @@ if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')): # Only exists in ptr-compr build
out = out + ' }\n'
out = out + '}\n'
-out = out + '\n}\n'
+out = out + '\n}\n}\n}\n'
try:
with open(sys.argv[2], "r") as out_file:
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index f3bd4811f2..9146dd4633 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -14,7 +14,9 @@
namespace i = v8::internal;
-namespace v8_debug_helper_internal {
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
constexpr char kObject[] = "v8::internal::Object";
constexpr char kTaggedValue[] = "v8::internal::TaggedValue";
@@ -104,6 +106,7 @@ TypedObject GetTypedObjectByInstanceType(uintptr_t address,
case i::INSTANCE_TYPE: \
return {type_source, std::make_unique<Tq##ClassName>(address)};
TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(INSTANCE_TYPE_CASE)
+ TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(INSTANCE_TYPE_CASE)
#undef INSTANCE_TYPE_CASE
default:
@@ -590,9 +593,11 @@ std::unique_ptr<ObjectPropertiesResult> GetObjectProperties(
stream.str(), kSmi);
}
-} // namespace v8_debug_helper_internal
+} // namespace debug_helper_internal
+} // namespace internal
+} // namespace v8
-namespace di = v8_debug_helper_internal;
+namespace di = v8::internal::debug_helper_internal;
extern "C" {
V8_DEBUG_HELPER_EXPORT d::ObjectPropertiesResult*
diff --git a/deps/v8/tools/debug_helper/heap-constants.cc b/deps/v8/tools/debug_helper/heap-constants.cc
index edb47c80b1..f62dd9b697 100644
--- a/deps/v8/tools/debug_helper/heap-constants.cc
+++ b/deps/v8/tools/debug_helper/heap-constants.cc
@@ -7,7 +7,9 @@
namespace d = v8::debug_helper;
-namespace v8_debug_helper_internal {
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
std::string FindKnownObject(uintptr_t address,
const d::HeapAddresses& heap_addresses) {
@@ -82,4 +84,6 @@ KnownInstanceType FindKnownMapInstanceTypes(
return result;
}
-} // namespace v8_debug_helper_internal
+} // namespace debug_helper_internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/tools/debug_helper/heap-constants.h b/deps/v8/tools/debug_helper/heap-constants.h
index fb2d20df12..89620479ec 100644
--- a/deps/v8/tools/debug_helper/heap-constants.h
+++ b/deps/v8/tools/debug_helper/heap-constants.h
@@ -14,7 +14,9 @@
namespace d = v8::debug_helper;
-namespace v8_debug_helper_internal {
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
// ===== Functions generated by gen-heap-constants.py: =========================
@@ -62,6 +64,8 @@ struct KnownInstanceType {
KnownInstanceType FindKnownMapInstanceTypes(
uintptr_t address, const d::HeapAddresses& heap_addresses);
-} // namespace v8_debug_helper_internal
+} // namespace debug_helper_internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/tools/debug_helper/list-object-classes.cc b/deps/v8/tools/debug_helper/list-object-classes.cc
index 6285aeb2f5..2d723e9218 100644
--- a/deps/v8/tools/debug_helper/list-object-classes.cc
+++ b/deps/v8/tools/debug_helper/list-object-classes.cc
@@ -5,7 +5,7 @@
#include "debug-helper-internal.h"
#include "torque-generated/class-debug-readers-tq.h"
-namespace di = v8_debug_helper_internal;
+namespace di = v8::internal::debug_helper_internal;
extern "C" {
V8_DEBUG_HELPER_EXPORT const d::ClassList*
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 67118368b4..9d5cbf056a 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -18,6 +18,9 @@ All arguments are optional. Most combinations should work, e.g.:
gm.py x64 mjsunit/foo cctest/test-bar/*
"""
# See HELP below for additional documentation.
+# Note on Python3 compatibility: gm.py itself is Python3 compatible, but
+# run-tests.py, which will be executed by the same binary, is not; hence
+# the hashbang line at the top of this file explicitly requires Python2.
from __future__ import print_function
import errno
@@ -174,7 +177,7 @@ def _CallWithOutput(cmd):
try:
while True:
try:
- data = os.read(master, 512)
+ data = os.read(master, 512).decode('utf-8')
except OSError as e:
if e.errno != errno.EIO: raise
break # EIO means EOF on some systems
diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py
index f8442bf462..23c6c17cc1 100644
--- a/deps/v8/tools/gdb-v8-support.py
+++ b/deps/v8/tools/gdb-v8-support.py
@@ -209,14 +209,15 @@ GDB_EXTERNAL_EDITOR environment variable.
super(Redirect, self).__init__("redirect", gdb.COMMAND_USER)
def invoke(self, subcommand, from_tty):
- old_stdout = gdb.execute("p dup(1)", to_string=True).split("=")[-1].strip()
+ old_stdout = gdb.execute(
+ "p (int)dup(1)", to_string=True).split("=")[-1].strip()
try:
time_suffix = time.strftime("%Y%m%d-%H%M%S")
fd, file = tempfile.mkstemp(suffix="-%s.gdbout" % time_suffix)
try:
# Temporaily redirect stdout to the created tmp file for the
# duration of the subcommand.
- gdb.execute('p dup2(open("%s", 1), 1)' % file, to_string=True)
+ gdb.execute('p (int)dup2(open("%s", 1), 1)' % file, to_string=True)
# Execute subcommand non interactively.
result = gdb.execute(subcommand, from_tty=False, to_string=True)
# Write returned string results to the temporary file as well.
@@ -231,11 +232,11 @@ GDB_EXTERNAL_EDITOR environment variable.
print("Open output:\n %s '%s'" % (os.environ['EDITOR'], file))
finally:
# Restore original stdout.
- gdb.execute("p dup2(%s, 1)" % old_stdout, to_string=True)
+ gdb.execute("p (int)dup2(%s, 1)" % old_stdout, to_string=True)
# Close the temporary file.
os.close(fd)
finally:
# Close the originally duplicated stdout descriptor.
- gdb.execute("p close(%s)" % old_stdout, to_string=True)
+ gdb.execute("p (int)close(%s)" % old_stdout, to_string=True)
Redirect()
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 1fddfcf83d..b36cd20221 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -263,7 +263,6 @@ extras_accessors = [
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
- 'UncompiledData, inferred_name, String, kInferredNameOffset',
'UncompiledData, start_position, int32_t, kStartPositionOffset',
'UncompiledData, end_position, int32_t, kEndPositionOffset',
'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
diff --git a/deps/v8/tools/generate-runtime-call-stats.py b/deps/v8/tools/generate-runtime-call-stats.py
index 2f9ba17871..ba10b799a1 100755
--- a/deps/v8/tools/generate-runtime-call-stats.py
+++ b/deps/v8/tools/generate-runtime-call-stats.py
@@ -9,6 +9,7 @@
import argparse
import csv
import json
+import glob
import os
import pathlib
import re
@@ -18,11 +19,12 @@ import statistics
import subprocess
import sys
import tempfile
-import gzip
from callstats_groups import RUNTIME_CALL_STATS_GROUPS
+JSON_FILE_EXTENSION=".pb_converted.json"
+
def parse_args():
parser = argparse.ArgumentParser(
description="Run story and collect runtime call stats.")
@@ -375,23 +377,19 @@ def collect_buckets(story, group=True, repeats=1, output_dir="."):
story_dir = f"{story.replace(':', '_')}_{i + 1}"
trace_dir = os.path.join(output_dir, "artifacts", story_dir, "trace",
"traceEvents")
- trace_file = os.path.join(trace_dir, "results.json")
-
- # this script always unzips the json file and stores the output in
- # results.json so just re-use that if it already exists, otherwise unzip the
- # one file found in the traceEvents directory.
- if not os.path.isfile(trace_file):
- trace_files = os.listdir(trace_dir)
- if len(trace_files) != 1:
- print("Expecting just one file but got: %s" % trace_files)
- sys.exit(1)
- gz_trace_file = os.path.join(trace_dir, trace_files[0])
- trace_file = os.path.join(trace_dir, "results.json")
+ # run_benchmark now dumps two files: a .pb.gz file and a .pb_converted.json
+ # file. We only need the latter.
+ trace_file_glob = os.path.join(trace_dir, "*" + JSON_FILE_EXTENSION)
+ trace_files = glob.glob(trace_file_glob)
+ if not trace_files:
+ print("Could not find *%s file in %s" % (JSON_FILE_EXTENSION, trace_dir))
+ sys.exit(1)
+ if len(trace_files) > 1:
+ print("Expecting one file but got: %s" % trace_files)
+ sys.exit(1)
- with gzip.open(gz_trace_file, "rb") as f_in:
- with open(trace_file, "wb") as f_out:
- shutil.copyfileobj(f_in, f_out)
+ trace_file = trace_files[0]
output = process_trace(trace_file)
for name in output:
@@ -474,12 +472,12 @@ def main():
if retain == "none":
shutil.rmtree(output_dir)
elif retain == "json":
- # Delete all files bottom up except .json.gz files and attempt to delete
- # subdirectories (ignoring errors).
+ # Delete all files bottom up except ones ending in JSON_FILE_EXTENSION and
+ # attempt to delete subdirectories (ignoring errors).
for dir_name, subdir_list, file_list in os.walk(
output_dir, topdown=False):
for file_name in file_list:
- if not file_name.endswith(".json.gz"):
+ if not file_name.endswith(JSON_FILE_EXTENSION):
os.remove(os.path.join(dir_name, file_name))
for subdir in subdir_list:
try:
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 765d8f3a31..9837acce3d 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Categories for instance types.
-const CATEGORIES = new Map([
+export const CATEGORIES = new Map([
[
'user', new Set([
'CONS_ONE_BYTE_STRING_TYPE',
@@ -204,7 +204,7 @@ const CATEGORIES = new Map([
]);
// Maps category to description text that is shown in html.
-const CATEGORY_NAMES = new Map([
+export const CATEGORY_NAMES = new Map([
['user', 'JS'],
['system', 'Metadata'],
['code', 'Code'],
diff --git a/deps/v8/tools/heap-stats/details-selection.html b/deps/v8/tools/heap-stats/details-selection-template.html
index 04b274a8d1..cd429bf1a5 100644
--- a/deps/v8/tools/heap-stats/details-selection.html
+++ b/deps/v8/tools/heap-stats/details-selection-template.html
@@ -1,7 +1,6 @@
<!-- Copyright 2018 the V8 project authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
-<template id="details-selection-template">
<style>
#dataSelectionSection {
display: none;
@@ -139,7 +138,3 @@ found in the LICENSE file. -->
<div id="categories"></div>
</section>
-</template>
-
-<script type="text/javascript" src="categories.js"></script>
-<script type="text/javascript" src="details-selection.js"></script>
diff --git a/deps/v8/tools/heap-stats/details-selection.js b/deps/v8/tools/heap-stats/details-selection.js
index 1e9cc83cff..f7e32733d9 100644
--- a/deps/v8/tools/heap-stats/details-selection.js
+++ b/deps/v8/tools/heap-stats/details-selection.js
@@ -4,19 +4,18 @@
'use strict';
-const details_selection_template =
- document.currentScript.ownerDocument.querySelector(
- '#details-selection-template');
+import {CATEGORIES, CATEGORY_NAMES} from './categories.js';
-const VIEW_BY_INSTANCE_TYPE = 'by-instance-type';
-const VIEW_BY_INSTANCE_CATEGORY = 'by-instance-category';
-const VIEW_BY_FIELD_TYPE = 'by-field-type';
+export const VIEW_BY_INSTANCE_TYPE = 'by-instance-type';
+export const VIEW_BY_INSTANCE_CATEGORY = 'by-instance-category';
+export const VIEW_BY_FIELD_TYPE = 'by-field-type';
-class DetailsSelection extends HTMLElement {
+defineCustomElement('details-selection', (templateText) =>
+ class DetailsSelection extends HTMLElement {
constructor() {
super();
const shadowRoot = this.attachShadow({mode: 'open'});
- shadowRoot.appendChild(details_selection_template.content.cloneNode(true));
+ shadowRoot.innerHTML = templateText;
this.isolateSelect.addEventListener(
'change', e => this.handleIsolateChange(e));
this.dataViewSelect.addEventListener(
@@ -403,6 +402,4 @@ class DetailsSelection extends HTMLElement {
link.click();
this.shadowRoot.removeChild(link);
}
-}
-
-customElements.define('details-selection', DetailsSelection);
+});
diff --git a/deps/v8/tools/heap-stats/global-timeline.html b/deps/v8/tools/heap-stats/global-timeline-template.html
index 788f966735..bb11b44961 100644
--- a/deps/v8/tools/heap-stats/global-timeline.html
+++ b/deps/v8/tools/heap-stats/global-timeline-template.html
@@ -1,7 +1,6 @@
<!-- Copyright 2018 the V8 project authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
-<template id="global-timeline-template">
<style>
#chart {
width: 100%;
@@ -12,5 +11,3 @@ found in the LICENSE file. -->
<h2>Timeline</h2>
<div id="chart"></div>
</div>
-</template>
-<script type="text/javascript" src="global-timeline.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/global-timeline.js b/deps/v8/tools/heap-stats/global-timeline.js
index c34ba2b913..2f16b1bdfb 100644
--- a/deps/v8/tools/heap-stats/global-timeline.js
+++ b/deps/v8/tools/heap-stats/global-timeline.js
@@ -4,15 +4,18 @@
'use strict';
-const global_timeline_template =
- document.currentScript.ownerDocument.querySelector(
- '#global-timeline-template');
-
-class GlobalTimeline extends HTMLElement {
+import {
+ VIEW_BY_INSTANCE_TYPE,
+ VIEW_BY_INSTANCE_CATEGORY,
+ VIEW_BY_FIELD_TYPE
+} from './details-selection.js';
+
+defineCustomElement('global-timeline', (templateText) =>
+ class GlobalTimeline extends HTMLElement {
constructor() {
super();
const shadowRoot = this.attachShadow({mode: 'open'});
- shadowRoot.appendChild(global_timeline_template.content.cloneNode(true));
+ shadowRoot.innerHTML = templateText;
}
$(id) {
@@ -217,6 +220,4 @@ class GlobalTimeline extends HTMLElement {
this.show();
chart.draw(data, google.charts.Line.convertOptions(options));
}
-}
-
-customElements.define('global-timeline', GlobalTimeline);
+});
diff --git a/deps/v8/tools/heap-stats/helper.js b/deps/v8/tools/heap-stats/helper.js
index 00f68f9d62..8416407ecc 100644
--- a/deps/v8/tools/heap-stats/helper.js
+++ b/deps/v8/tools/heap-stats/helper.js
@@ -21,3 +21,10 @@ function formatBytes(bytes) {
function formatSeconds(millis) {
return (millis * kMillis2Seconds).toFixed(2) + 's';
}
+
+function defineCustomElement(name, generator) {
+ let htmlTemplatePath = name + '-template.html';
+ fetch(htmlTemplatePath)
+ .then(stream => stream.text())
+ .then(templateText => customElements.define(name, generator(templateText)));
+}
diff --git a/deps/v8/tools/heap-stats/histogram-viewer.html b/deps/v8/tools/heap-stats/histogram-viewer-template.html
index 93fe980978..42c8e706af 100644
--- a/deps/v8/tools/heap-stats/histogram-viewer.html
+++ b/deps/v8/tools/heap-stats/histogram-viewer-template.html
@@ -1,7 +1,6 @@
<!-- Copyright 2018 the V8 project authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
-<template id="histogram-viewer-template">
<style>
#chart {
width: 100%;
@@ -15,5 +14,3 @@ found in the LICENSE file. -->
</ul>
<div id="chart"></div>
</div>
-</template>
-<script type="text/javascript" src="histogram-viewer.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/histogram-viewer.js b/deps/v8/tools/heap-stats/histogram-viewer.js
index 240c6cbc7e..4f1e02f8c6 100644
--- a/deps/v8/tools/heap-stats/histogram-viewer.js
+++ b/deps/v8/tools/heap-stats/histogram-viewer.js
@@ -4,15 +4,18 @@
'use strict';
-const histogram_viewer_template =
- document.currentScript.ownerDocument.querySelector(
- '#histogram-viewer-template');
-
-class HistogramViewer extends HTMLElement {
+import {
+ VIEW_BY_INSTANCE_TYPE,
+ VIEW_BY_INSTANCE_CATEGORY,
+ VIEW_BY_FIELD_TYPE
+} from './details-selection.js';
+
+defineCustomElement('histogram-viewer', (templateText) =>
+ class HistogramViewer extends HTMLElement {
constructor() {
super();
const shadowRoot = this.attachShadow({mode: 'open'});
- shadowRoot.appendChild(histogram_viewer_template.content.cloneNode(true));
+ shadowRoot.innerHTML = templateText;
}
$(id) {
@@ -185,6 +188,4 @@ class HistogramViewer extends HTMLElement {
this.show();
chart.draw(data, options);
}
-}
-
-customElements.define('histogram-viewer', HistogramViewer);
+});
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
index 11fac21a3d..efb74af011 100644
--- a/deps/v8/tools/heap-stats/index.html
+++ b/deps/v8/tools/heap-stats/index.html
@@ -16,12 +16,14 @@ found in the LICENSE file. -->
integrity="sha256-N1z6ddQzX83fjw8v7uSNe7/MgOmMKdwFUv1+AJMDqNM="
crossorigin="anonymous"></script>
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/oboe.js/2.1.5/oboe-browser.min.js"
+ crossorigin="anonymous"></script>
<script src="helper.js"></script>
- <link rel="import" href="details-selection.html">
- <link rel="import" href="global-timeline.html">
- <link rel="import" href="histogram-viewer.html">
- <link rel="import" href="trace-file-reader.html">
+ <script type="module" src="details-selection.js"></script>
+ <script type="module" src="global-timeline.js"></script>
+ <script type="module" src="histogram-viewer.js"></script>
+ <script type="module" src="trace-file-reader.js"></script>
<style>
body {
diff --git a/deps/v8/tools/heap-stats/model.js b/deps/v8/tools/heap-stats/model.js
index cd6e1e514e..d284d9b213 100644
--- a/deps/v8/tools/heap-stats/model.js
+++ b/deps/v8/tools/heap-stats/model.js
@@ -4,7 +4,7 @@
'use strict';
-class Isolate {
+export class Isolate {
constructor(address) {
this.address = address;
this.start = null;
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.html b/deps/v8/tools/heap-stats/trace-file-reader-template.html
index c5e5c6f04a..c0fadbf1a1 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.html
+++ b/deps/v8/tools/heap-stats/trace-file-reader-template.html
@@ -1,7 +1,6 @@
<!-- Copyright 2018 the V8 project authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
-<template id="trace-file-reader-template">
<style>
#fileReader {
width: 100%;
@@ -80,7 +79,3 @@ found in the LICENSE file. -->
<div id="spinner"></div>
</div>
</section>
-</template>
-<script type="text/javascript" src="model.js"></script>
-
-<script src="trace-file-reader.js"></script>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index 86d9d7d551..e297723e6f 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -4,15 +4,14 @@
'use strict';
-const trace_file_reader_template =
- document.currentScript.ownerDocument.querySelector(
- '#trace-file-reader-template');
+import {Isolate} from './model.js';
-class TraceFileReader extends HTMLElement {
+defineCustomElement('trace-file-reader', (templateText) =>
+ class TraceFileReader extends HTMLElement {
constructor() {
super();
const shadowRoot = this.attachShadow({mode: 'open'});
- shadowRoot.appendChild(trace_file_reader_template.content.cloneNode(true));
+ shadowRoot.innerHTML = templateText;
this.addEventListener('click', e => this.handleClick(e));
this.addEventListener('dragover', e => this.handleDragOver(e));
this.addEventListener('drop', e => this.handleChange(e));
@@ -96,10 +95,13 @@ class TraceFileReader extends HTMLElement {
}
processRawText(file, result) {
- let contents = result.split('\n');
- const return_data = (result.includes('V8.GC_Objects_Stats')) ?
- this.createModelFromChromeTraceFile(contents) :
- this.createModelFromV8TraceFile(contents);
+ let return_data;
+ if (result.includes('V8.GC_Objects_Stats')) {
+ return_data = this.createModelFromChromeTraceFile(result);
+ } else {
+ let contents = result.split('\n');
+ return_data = this.createModelFromV8TraceFile(contents);
+ }
this.extendAndSanitizeModel(return_data);
this.updateLabel('Finished loading \'' + file.name + '\'.');
this.dispatchEvent(new CustomEvent(
@@ -176,74 +178,62 @@ class TraceFileReader extends HTMLElement {
}
createModelFromChromeTraceFile(contents) {
- // Trace files support two formats.
- // {traceEvents: [ data ]}
- const kObjectTraceFile = {
- name: 'object',
- endToken: ']}',
- getDataArray: o => o.traceEvents
- };
- // [ data ]
- const kArrayTraceFile = {
- name: 'array',
- endToken: ']',
- getDataArray: o => o
- };
- const handler =
- (contents[0][0] === '{') ? kObjectTraceFile : kArrayTraceFile;
- console.log(`Processing log as chrome trace file (${handler.name}).`);
+ const data = Object.create(null); // Final data container.
+ const parseOneGCEvent = (actual_data) => {
+ Object.keys(actual_data).forEach(data_set => {
+ const string_entry = actual_data[data_set];
+ try {
+ const entry = JSON.parse(string_entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, data_set);
+ const isolate = entry.isolate;
+ const time = entry.time;
+ const gc_id = entry.id;
+ data[isolate].gcs[gc_id].time = time;
- // Pop last line in log as it might be broken.
- contents.pop();
- // Remove trailing comma.
- contents[contents.length - 1] = contents[contents.length - 1].slice(0, -1);
- // Terminate JSON.
- const sanitized_contents = [...contents, handler.endToken].join('');
+ const field_data = entry.field_data;
+ this.addFieldTypeData(data, isolate, gc_id, data_set,
+ field_data.tagged_fields,
+ field_data.inobject_smi_fields,
+ field_data.embedder_fields,
+ field_data.unboxed_double_fields,
+ field_data.boxed_double_fields,
+ field_data.string_data,
+ field_data.other_raw_fields);
- const data = Object.create(null); // Final data container.
+ data[isolate].gcs[gc_id][data_set].bucket_sizes =
+ entry.bucket_sizes;
+ for (let [instance_type, value] of Object.entries(
+ entry.type_data)) {
+ // Trace file format uses markers that do not have actual
+ // properties.
+ if (!('overall' in value)) continue;
+ this.addInstanceTypeData(
+ data, isolate, gc_id, data_set, instance_type, value);
+ }
+ } catch (e) {
+ console.error('Unable to parse data set entry', e);
+ }
+ });
+ };
+ console.log(`Processing log as chrome trace file.`);
try {
- const raw_data = JSON.parse(sanitized_contents);
- const raw_array_data = handler.getDataArray(raw_data);
- raw_array_data.filter(e => e.name === 'V8.GC_Objects_Stats')
- .forEach(trace_data => {
- const actual_data = trace_data.args;
- const data_sets = new Set(Object.keys(actual_data));
- Object.keys(actual_data).forEach(data_set => {
- const string_entry = actual_data[data_set];
- try {
- const entry = JSON.parse(string_entry);
- this.createOrUpdateEntryIfNeeded(data, entry);
- this.createDatasetIfNeeded(data, entry, data_set);
- const isolate = entry.isolate;
- const time = entry.time;
- const gc_id = entry.id;
- data[isolate].gcs[gc_id].time = time;
-
- const field_data = entry.field_data;
- this.addFieldTypeData(data, isolate, gc_id, data_set,
- field_data.tagged_fields,
- field_data.inobject_smi_fields,
- field_data.embedder_fields,
- field_data.unboxed_double_fields,
- field_data.boxed_double_fields,
- field_data.string_data,
- field_data.other_raw_fields);
+ let gc_events_filter = (event) => {
+ if (event.name == 'V8.GC_Objects_Stats') {
+ parseOneGCEvent(event.args);
+ }
+ return oboe.drop;
+ };
- data[isolate].gcs[gc_id][data_set].bucket_sizes =
- entry.bucket_sizes;
- for (let [instance_type, value] of Object.entries(
- entry.type_data)) {
- // Trace file format uses markers that do not have actual
- // properties.
- if (!('overall' in value)) continue;
- this.addInstanceTypeData(
- data, isolate, gc_id, data_set, instance_type, value);
- }
- } catch (e) {
- console.log('Unable to parse data set entry', e);
- }
- });
- });
+ let oboe_stream = oboe();
+ // Trace files support two formats.
+ oboe_stream
+ // 1) {traceEvents: [ data ]}
+ .node('traceEvents.*', gc_events_filter)
+ // 2) [ data ]
+ .node('!.*', gc_events_filter)
+ .fail(() => { throw new Error("Trace data parse failed!"); });
+ oboe_stream.emit('data', contents);
} catch (e) {
console.error('Unable to parse chrome trace file.', e);
}
@@ -314,6 +304,4 @@ class TraceFileReader extends HTMLElement {
}
return data;
}
-}
-
-customElements.define('trace-file-reader', TraceFileReader);
+});
diff --git a/deps/v8/tools/map-processor.html b/deps/v8/tools/map-processor.html
index 70c205c771..a453c9a189 100644
--- a/deps/v8/tools/map-processor.html
+++ b/deps/v8/tools/map-processor.html
@@ -113,6 +113,17 @@ h1, h2, h3, section {
max-height: 200px;
overflow-y: scroll;
}
+
+#stats .transitionType {
+ text-align: right;
+}
+#stats .transitionType tr td:nth-child(2) {
+ text-align: left;
+}
+#stats .transitionType tr:nth-child(1) td {
+ border-bottom: 1px black dotted;
+}
+
#timeline {
position: relative;
height: 300px;
@@ -436,9 +447,9 @@ function td(textOrNode) {
return node;
}
+
function tr() {
- let node = document.createElement("tr");
- return node;
+ return document.createElement("tr");
}
function define(prototype, name, fn) {
@@ -1103,7 +1114,7 @@ class StatsView {
];
let text = "";
- let tableNode = table();
+ let tableNode = table("transitionType");
let name, filter;
let total = this.timeline.size();
pairs.forEach(([name, color, filter]) => {
@@ -1120,7 +1131,7 @@ class StatsView {
let count = this.timeline.count(filter);
row.appendChild(td(count));
let percent = Math.round(count / total * 1000) / 10;
- row.appendChild(td(percent + "%"));
+ row.appendChild(td(percent.toFixed(1) + "%"));
tableNode.appendChild(row);
});
this.node.appendChild(tableNode);
@@ -1204,7 +1215,7 @@ function transitionTypeToColor(type) {
<h2>Instructions</h2>
<section>
- <p>Visualize Map trees that have been gathere using <code>--trace-maps</code>.</p>
+ <p>Visualize Map trees that have been gathered using <code>--trace-maps</code>.</p>
</section>
<div id="tooltip">
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index cd638bda27..f3e46158b9 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -64,6 +64,8 @@ class MetaBuildWrapper(object):
self.luci_tryservers = {}
self.masters = {}
self.mixins = {}
+ self.isolate_exe = 'isolate.exe' if self.platform.startswith(
+ 'win') else 'isolate'
def Main(self, args):
self.ParseArgs(args)
@@ -360,19 +362,39 @@ class MetaBuildWrapper(object):
for k, v in self._DefaultDimensions() + self.args.dimensions:
dimensions += ['-d', k, v]
+ archive_json_path = self.ToSrcRelPath(
+ '%s/%s.archive.json' % (build_dir, target))
cmd = [
- self.executable,
- self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
+ self.isolate_exe),
'archive',
+ '-i',
+ self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'-I', 'isolateserver.appspot.com',
+ '-dump-json',
+ archive_json_path,
]
- ret, out, _ = self.Run(cmd, force_verbose=False)
+ ret, _, _ = self.Run(cmd, force_verbose=False)
if ret:
return ret
- isolated_hash = out.splitlines()[0].split()[0]
+ try:
+ archive_hashes = json.loads(self.ReadFile(archive_json_path))
+ except Exception:
+ self.Print(
+ 'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr)
+ return 1
+ try:
+ isolated_hash = archive_hashes[target]
+ except Exception:
+ self.Print(
+ 'Cannot find hash for "%s" in "%s", file content: %s' %
+ (target, archive_json_path, archive_hashes),
+ file=sys.stderr)
+ return 1
+
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'swarming.py'),
@@ -388,11 +410,11 @@ class MetaBuildWrapper(object):
def _RunLocallyIsolated(self, build_dir, target):
cmd = [
- self.executable,
- self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
+ self.isolate_exe),
'run',
- '-s',
- self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+ '-i',
+ self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
]
if self.args.extra_args:
cmd += ['--'] + self.args.extra_args
@@ -789,13 +811,11 @@ class MetaBuildWrapper(object):
self.WriteIsolateFiles(build_dir, target, runtime_deps)
ret, _, _ = self.Run([
- self.executable,
- self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
+ self.isolate_exe),
'check',
'-i',
- self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
- '-s',
- self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))],
+ self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target))],
buffer_output=False)
return ret
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index a22686a5ee..3a0b89b29d 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -533,16 +533,11 @@ class UnitTest(unittest.TestCase):
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
+ 'out/Default/base_unittests.archive.json':
+ ("{\"base_unittests\":\"fake_hash\"}"),
}
- def run_stub(cmd, **_kwargs):
- if 'isolate.py' in cmd[1]:
- return 0, 'fake_hash base_unittests', ''
- else:
- return 0, '', ''
-
mbw = self.fake_mbw(files=files)
- mbw.Run = run_stub
self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
'base_unittests'], mbw=mbw, ret=0)
self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
index ef362712fe..f8829679db 100644
--- a/deps/v8/tools/profile.js
+++ b/deps/v8/tools/profile.js
@@ -169,8 +169,12 @@ Profile.prototype.addFuncCode = function(
if (entry.size === size && entry.func === func) {
// Entry state has changed.
entry.state = state;
+ } else {
+ this.codeMap_.deleteCode(start);
+ entry = null;
}
- } else {
+ }
+ if (!entry) {
entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
this.codeMap_.addCode(start, entry);
}
@@ -935,14 +939,16 @@ JsonProfile.prototype.addFuncCode = function(
// TODO(jarin): Insert the code object into the SFI's code list.
var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
if (entry) {
- // TODO(jarin) This does not look correct, we should really
- // update the code object (remove the old one and insert this one).
if (entry.size === size && entry.func === func) {
// Entry state has changed.
entry.state = state;
+ } else {
+ this.codeMap_.deleteCode(start);
+ entry = null;
}
- } else {
- var entry = new CodeMap.CodeEntry(size, name, 'JS');
+ }
+ if (!entry) {
+ entry = new CodeMap.CodeEntry(size, name, 'JS');
this.codeMap_.addCode(start, entry);
entry.codeId = this.codeEntries_.length;
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index 0997e0bb89..716d146136 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -227,7 +227,6 @@ class GitRecipesMixin(object):
args += ["--cc", Quoted(cc)]
if message_file:
args += ["--message-file", Quoted(message_file)]
- args += ["--gerrit"]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
self.Git(MakeArgs(args), pipe=False, **kwargs)
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 36ee6c1d44..44376b1dc3 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -470,7 +470,7 @@ TBR=reviewer@chromium.org"""
cb=CheckVersionCommit),
Cmd("git cl upload --send-mail "
"-f --bypass-hooks --no-autocc --message-file "
- "\"%s\" --gerrit" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+ "\"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
Cmd("git cl land --bypass-hooks -f", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
@@ -621,7 +621,7 @@ deps = {
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail -f "
- "--cq-dry-run --bypass-hooks --gerrit", "",
+ "--cq-dry-run --bypass-hooks", "",
cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
@@ -773,7 +773,7 @@ BUG=123,234,345,456,567,v8:123
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
- "--bypass-hooks --cc \"ulan@chromium.org\" --gerrit", ""),
+ "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
@@ -909,7 +909,7 @@ NOTREECHECKS=true
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
- "--bypass-hooks --cc \"ulan@chromium.org\" --gerrit", ""),
+ "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index c09b7cef39..43cb639c83 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -123,9 +123,8 @@ class ModeConfig(object):
self.execution_mode = execution_mode
-DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap",
- "--testing-d8-test-runner"]
-RELEASE_FLAGS = ["--nohard-abort", "--testing-d8-test-runner"]
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort"]
MODES = {
"debug": ModeConfig(
flags=DEBUG_FLAGS,
@@ -345,9 +344,8 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
+ parser.add_option('--slow-tests-cutoff', type="int", default=100,
+ help='Collect N slowest tests')
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -381,6 +379,8 @@ class BaseTestRunner(object):
help="Timeout for single test in seconds")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="Verbose output")
+ parser.add_option('--regenerate-expected-files', default=False, action='store_true',
+ help='Regenerate expected files')
# TODO(machenbach): Temporary options for rolling out new test runner
# features.
@@ -705,13 +705,17 @@ class BaseTestRunner(object):
"pointer_compression": self.build_config.pointer_compression,
}
+ def _runner_flags(self):
+ """Extra default flags specific to the test runner implementation."""
+ return []
+
def _create_test_config(self, options):
timeout = options.timeout * self._timeout_scalefactor(options)
return TestConfig(
command_prefix=options.command_prefix,
extra_flags=options.extra_flags,
isolates=options.isolates,
- mode_flags=self.mode_options.flags,
+ mode_flags=self.mode_options.flags + self._runner_flags(),
no_harness=options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=options.random_seed,
@@ -719,6 +723,7 @@ class BaseTestRunner(object):
shell_dir=self.outdir,
timeout=timeout,
verbose=options.verbose,
+ regenerate_expected_files=options.regenerate_expected_files,
)
def _timeout_scalefactor(self, options):
@@ -792,13 +797,9 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
- options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index a6fb91f912..f3f2e9dadc 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -196,10 +196,10 @@ class Pool():
# gracefully, e.g. missing test files.
internal_error = True
continue
-
- if self.abort_now:
- # SIGINT, SIGTERM or internal hard timeout.
- return
+ finally:
+ if self.abort_now:
+ # SIGINT, SIGTERM or internal hard timeout.
+ return
yield result
break
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index c363c2a5ec..7464978eb2 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -14,6 +14,7 @@ ALL_VARIANT_FLAGS = {
"interpreted_regexp": [["--regexp-interpret-all"]],
"jitless": [["--jitless"]],
"minor_mc": [["--minor-mc"]],
+ "no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index 39efb60fe9..9b65ca564a 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -19,8 +19,13 @@ OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
class BaseOutProc(object):
def process(self, output, reduction=None):
has_unexpected_output = self.has_unexpected_output(output)
+ if has_unexpected_output:
+ self.regenerate_expected_files(output)
return self._create_result(has_unexpected_output, output, reduction)
+ def regenerate_expected_files(self, output):
+ return
+
def has_unexpected_output(self, output):
return self.get_outcome(output) not in self.expected_outcomes
@@ -125,9 +130,11 @@ class ExpectedOutProc(OutProc):
"""Output processor that has is_failure_output depending on comparing the
output with the expected output.
"""
- def __init__(self, expected_outcomes, expected_filename):
+ def __init__(self, expected_outcomes, expected_filename,
+ regenerate_expected_files=False):
super(ExpectedOutProc, self).__init__(expected_outcomes)
self._expected_filename = expected_filename
+ self._regenerate_expected_files = regenerate_expected_files
def _is_failure_output(self, output):
with open(self._expected_filename, 'r') as f:
@@ -143,6 +150,14 @@ class ExpectedOutProc(OutProc):
return True
return False
+ def regenerate_expected_files(self, output):
+ if not self._regenerate_expected_files:
+ return
+ lines = output.stdout.splitlines()
+ with open(self._expected_filename, 'w') as f:
+ for _, line in enumerate(lines):
+ f.write(line+'\n')
+
def _act_block_iterator(self, output):
"""Iterates over blocks of actual output lines."""
lines = output.stdout.splitlines()
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 0ca387000c..1b7acc41b7 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -50,6 +50,9 @@ VARIANT_ALIASES = {
'instruction_scheduling'],
}
+# Extra flags passed to all tests using the standard test runner.
+EXTRA_DEFAULT_FLAGS = ['--testing-d8-test-runner']
+
GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
'--concurrent-recompilation-queue-length=64',
'--concurrent-recompilation-delay=500',
@@ -239,6 +242,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
prefix="v8-test-runner-")
options.json_test_results = self._temporary_json_output_file.name
+ def _runner_flags(self):
+ return EXTRA_DEFAULT_FLAGS
+
def _parse_variants(self, aliases_str):
# Use developer defaults if no variant was specified.
aliases_str = aliases_str or 'dev'
diff --git a/deps/v8/tools/testrunner/test_config.py b/deps/v8/tools/testrunner/test_config.py
index 27ac72bf6c..fed6006069 100644
--- a/deps/v8/tools/testrunner/test_config.py
+++ b/deps/v8/tools/testrunner/test_config.py
@@ -19,7 +19,8 @@ class TestConfig(object):
run_skipped,
shell_dir,
timeout,
- verbose):
+ verbose,
+ regenerate_expected_files=False):
self.command_prefix = command_prefix
self.extra_flags = extra_flags
self.isolates = isolates
@@ -32,3 +33,4 @@ class TestConfig(object):
self.shell_dir = shell_dir
self.timeout = timeout
self.verbose = verbose
+ self.regenerate_expected_files = regenerate_expected_files
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index 271737897a..b802368183 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -13,9 +13,12 @@ from . import base
EXTRA_FLAGS = [
(0.1, '--always-opt'),
(0.1, '--assert-types'),
+ # TODO(mythria): https://crbug.com/v8/10243
+ # (0.1, '--budget-for-feedback-vector-allocation=0'),
(0.1, '--cache=code'),
(0.1, '--force-slow-path'),
(0.2, '--future'),
+ (0.1, '--interrupt-budget=100'),
(0.1, '--liftoff'),
(0.2, '--no-analyze-environment-liveness'),
(0.1, '--no-enable-sse3'),
@@ -29,6 +32,7 @@ EXTRA_FLAGS = [
(0.1, '--no-enable-bmi2'),
(0.1, '--no-enable-lzcnt'),
(0.1, '--no-enable-popcnt'),
+ (0.3, '--no-lazy-feedback-allocation'),
(0.1, '--no-liftoff'),
(0.1, '--no-opt'),
(0.2, '--no-regexp-tier-up'),
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 9ae589dee0..3357eef76c 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -14,7 +14,6 @@ import sys
import time
from . import base
-from ..local import junit_output
# Base dir of the build products for Release and Debug.
@@ -317,47 +316,8 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, framework_name, json_test_results, arch, mode):
+ def __init__(self, framework_name, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
@@ -366,7 +326,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self._requirement = base.DROP_PASS_STDOUT
self.framework_name = framework_name
- self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.results = []
@@ -412,8 +371,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
def finished(self):
complete_results = []
- if os.path.exists(self.json_test_results):
- with open(self.json_test_results, "r") as f:
+ if os.path.exists(self.options.json_test_results):
+ with open(self.options.json_test_results, "r") as f:
# On bots we might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
@@ -426,15 +385,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
# Sort tests by duration.
self.tests.sort(key=lambda __duration_cmd: __duration_cmd[1], reverse=True)
- slowest_tests = [
- {
- "name": str(test),
- "flags": cmd.args,
- "command": cmd.to_string(relative=True),
- "duration": duration,
- "marked_slow": test.is_slow,
- } for (test, duration, cmd) in self.tests[:20]
- ]
+ cutoff = self.options.slow_tests_cutoff
+ slowest_tests = self._test_records(self.tests[:cutoff])
complete_results.append({
"arch": self.arch,
@@ -445,5 +397,16 @@ class JsonTestProgressIndicator(ProgressIndicator):
"test_total": len(self.tests),
})
- with open(self.json_test_results, "w") as f:
+ with open(self.options.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
+
+ def _test_records(self, tests):
+ return [
+ {
+ "name": str(test),
+ "flags": cmd.args,
+ "command": cmd.to_string(relative=True),
+ "duration": duration,
+ "marked_slow": test.is_slow,
+ } for (test, duration, cmd) in tests
+ ]
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 93331cfa2d..d0e21785ce 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -62,7 +62,7 @@ if (params.sourceMap) {
sourceMap = SourceMap.load(params.sourceMap);
}
var tickProcessor = new TickProcessor(
- new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+ new (entriesProviders[params.platform])(params.nm, params.objdump, params.targetRootFS,
params.apkEmbeddedLibrary),
params.separateIc,
params.separateBytecodes,
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 34c2249fcc..2a5b9af83c 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -706,10 +706,14 @@ CppEntriesProvider.prototype.parseNextLine = function() {
};
-function UnixCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
+function UnixCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
this.symbols = [];
+ // File offset of a symbol minus the virtual address of a symbol found in
+ // the symbol table.
+ this.fileOffsetMinusVma = 0;
this.parsePos = 0;
this.nmExec = nmExec;
+ this.objdumpExec = objdumpExec;
this.targetRootFS = targetRootFS;
this.apkEmbeddedLibrary = apkEmbeddedLibrary;
this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
@@ -731,6 +735,14 @@ UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
];
+
+ const objdumpOutput = os.system(this.objdumpExec, ['-h', libName], -1, -1);
+ for (const line of objdumpOutput.split('\n')) {
+ const [,sectionName,,vma,,fileOffset] = line.trim().split(/\s+/);
+ if (sectionName === ".text") {
+ this.fileOffsetMinusVma = parseInt(fileOffset, 16) - parseInt(vma, 16);
+ }
+ }
} catch (e) {
// If the library cannot be found on this system let's not panic.
this.symbols = ['', ''];
@@ -754,7 +766,7 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() {
var fields = line.match(this.FUNC_RE);
var funcInfo = null;
if (fields) {
- funcInfo = { name: fields[3], start: parseInt(fields[1], 16) };
+ funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
if (fields[2]) {
funcInfo.size = parseInt(fields[2], 16);
}
@@ -763,8 +775,8 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() {
};
-function MacCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
- UnixCppEntriesProvider.call(this, nmExec, targetRootFS, apkEmbeddedLibrary);
+function MacCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
+ UnixCppEntriesProvider.call(this, nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary);
// Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
};
@@ -786,7 +798,7 @@ MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
};
-function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS,
+function WindowsCppEntriesProvider(_ignored_nmExec, _ignored_objdumpExec, targetRootFS,
_ignored_apkEmbeddedLibrary) {
this.targetRootFS = targetRootFS;
this.symbols = '';
@@ -909,6 +921,8 @@ class ArgumentsProcessor extends BaseArgumentsProcessor {
'Specify that we are running on Mac OS X platform'],
'--nm': ['nm', 'nm',
'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+ '--objdump': ['objdump', 'objdump',
+ 'Specify the \'objdump\' executable to use (e.g. --objdump=/my_dir/objdump)'],
'--target': ['targetRootFS', '',
'Specify the target root directory for cross environment'],
'--apk-embedded-library': ['apkEmbeddedLibrary', '',
@@ -951,6 +965,7 @@ class ArgumentsProcessor extends BaseArgumentsProcessor {
preprocessJson: null,
targetRootFS: '',
nm: 'nm',
+ objdump: 'objdump',
range: 'auto,auto',
distortion: 0,
timedRange: false,
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 116266ca42..14d999b5f4 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -665,6 +665,24 @@ class SystemTest(unittest.TestCase):
self.assertEqual(0, result.returncode, result)
+ def testRunnerFlags(self):
+ """Test that runner-specific flags are passed to tests."""
+ with temp_base() as basedir:
+ result = run_tests(
+ basedir,
+ '--mode=Release',
+ '--progress=verbose',
+ '--variants=default',
+ '--random-seed=42',
+ 'sweet/bananas',
+ '-v',
+ )
+
+ self.assertIn(
+ '--test bananas --random-seed=42 --nohard-abort --testing-d8-test-runner',
+ result.stdout, result)
+ self.assertEqual(0, result.returncode, result)
+
if __name__ == '__main__':
unittest.main()
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index f2f9546bdb..40677b3a0a 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -325,7 +325,13 @@ class CppLintProcessor(CacheableSourceFileProcessor):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
- IGNORE_LINT = ['export-template.h', 'flag-definitions.h']
+ IGNORE_LINT = [
+ 'export-template.h',
+ 'flag-definitions.h',
+ 'gay-fixed.cc',
+ 'gay-precision.cc',
+ 'gay-shortest.cc',
+ ]
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index ccd02e67bf..07c4ebf8cb 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -29,8 +29,8 @@ INSTANCE_TYPES = {
65: "BIG_INT_BASE_TYPE",
66: "HEAP_NUMBER_TYPE",
67: "ODDBALL_TYPE",
- 68: "SOURCE_TEXT_MODULE_TYPE",
- 69: "SYNTHETIC_MODULE_TYPE",
+ 68: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 69: "EXPORTED_SUB_CLASS_TYPE",
70: "FOREIGN_TYPE",
71: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
72: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
@@ -60,83 +60,88 @@ INSTANCE_TYPES = {
96: "FEEDBACK_CELL_TYPE",
97: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
98: "INTERCEPTOR_INFO_TYPE",
- 99: "INTERNAL_CLASS_TYPE",
- 100: "INTERPRETER_DATA_TYPE",
- 101: "PROMISE_CAPABILITY_TYPE",
- 102: "PROMISE_REACTION_TYPE",
- 103: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 104: "PROTOTYPE_INFO_TYPE",
- 105: "SCRIPT_TYPE",
- 106: "SMI_BOX_TYPE",
- 107: "SMI_PAIR_TYPE",
- 108: "SORT_STATE_TYPE",
- 109: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 110: "STACK_FRAME_INFO_TYPE",
- 111: "STACK_TRACE_FRAME_TYPE",
- 112: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 113: "TUPLE2_TYPE",
- 114: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 115: "WASM_DEBUG_INFO_TYPE",
- 116: "WASM_EXCEPTION_TAG_TYPE",
- 117: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 118: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 119: "WASM_JS_FUNCTION_DATA_TYPE",
- 120: "FIXED_ARRAY_TYPE",
- 121: "HASH_TABLE_TYPE",
- 122: "EPHEMERON_HASH_TABLE_TYPE",
- 123: "GLOBAL_DICTIONARY_TYPE",
- 124: "NAME_DICTIONARY_TYPE",
- 125: "NUMBER_DICTIONARY_TYPE",
- 126: "ORDERED_HASH_MAP_TYPE",
- 127: "ORDERED_HASH_SET_TYPE",
- 128: "ORDERED_NAME_DICTIONARY_TYPE",
- 129: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 130: "STRING_TABLE_TYPE",
- 131: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 132: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 133: "SCOPE_INFO_TYPE",
- 134: "SCRIPT_CONTEXT_TABLE_TYPE",
- 135: "BYTE_ARRAY_TYPE",
- 136: "BYTECODE_ARRAY_TYPE",
- 137: "FIXED_DOUBLE_ARRAY_TYPE",
- 138: "AWAIT_CONTEXT_TYPE",
- 139: "BLOCK_CONTEXT_TYPE",
- 140: "CATCH_CONTEXT_TYPE",
- 141: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 142: "EVAL_CONTEXT_TYPE",
- 143: "FUNCTION_CONTEXT_TYPE",
- 144: "MODULE_CONTEXT_TYPE",
- 145: "NATIVE_CONTEXT_TYPE",
- 146: "SCRIPT_CONTEXT_TYPE",
- 147: "WITH_CONTEXT_TYPE",
- 148: "SMALL_ORDERED_HASH_MAP_TYPE",
- 149: "SMALL_ORDERED_HASH_SET_TYPE",
- 150: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 151: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 152: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 153: "WEAK_FIXED_ARRAY_TYPE",
- 154: "TRANSITION_ARRAY_TYPE",
- 155: "CELL_TYPE",
- 156: "CODE_TYPE",
- 157: "CODE_DATA_CONTAINER_TYPE",
+ 99: "INTERPRETER_DATA_TYPE",
+ 100: "PROMISE_CAPABILITY_TYPE",
+ 101: "PROMISE_REACTION_TYPE",
+ 102: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 103: "PROTOTYPE_INFO_TYPE",
+ 104: "SCRIPT_TYPE",
+ 105: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 106: "STACK_FRAME_INFO_TYPE",
+ 107: "STACK_TRACE_FRAME_TYPE",
+ 108: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 109: "TUPLE2_TYPE",
+ 110: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 111: "WASM_DEBUG_INFO_TYPE",
+ 112: "WASM_EXCEPTION_TAG_TYPE",
+ 113: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 115: "WASM_JS_FUNCTION_DATA_TYPE",
+ 116: "FIXED_ARRAY_TYPE",
+ 117: "HASH_TABLE_TYPE",
+ 118: "EPHEMERON_HASH_TABLE_TYPE",
+ 119: "GLOBAL_DICTIONARY_TYPE",
+ 120: "NAME_DICTIONARY_TYPE",
+ 121: "NUMBER_DICTIONARY_TYPE",
+ 122: "ORDERED_HASH_MAP_TYPE",
+ 123: "ORDERED_HASH_SET_TYPE",
+ 124: "ORDERED_NAME_DICTIONARY_TYPE",
+ 125: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 126: "STRING_TABLE_TYPE",
+ 127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 128: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 129: "SCOPE_INFO_TYPE",
+ 130: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 131: "BYTE_ARRAY_TYPE",
+ 132: "BYTECODE_ARRAY_TYPE",
+ 133: "FIXED_DOUBLE_ARRAY_TYPE",
+ 134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 135: "AWAIT_CONTEXT_TYPE",
+ 136: "BLOCK_CONTEXT_TYPE",
+ 137: "CATCH_CONTEXT_TYPE",
+ 138: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 139: "EVAL_CONTEXT_TYPE",
+ 140: "FUNCTION_CONTEXT_TYPE",
+ 141: "MODULE_CONTEXT_TYPE",
+ 142: "NATIVE_CONTEXT_TYPE",
+ 143: "SCRIPT_CONTEXT_TYPE",
+ 144: "WITH_CONTEXT_TYPE",
+ 145: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 146: "SMALL_ORDERED_HASH_SET_TYPE",
+ 147: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 148: "SOURCE_TEXT_MODULE_TYPE",
+ 149: "SYNTHETIC_MODULE_TYPE",
+ 150: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 151: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 152: "WEAK_FIXED_ARRAY_TYPE",
+ 153: "TRANSITION_ARRAY_TYPE",
+ 154: "CELL_TYPE",
+ 155: "CODE_TYPE",
+ 156: "CODE_DATA_CONTAINER_TYPE",
+ 157: "COVERAGE_INFO_TYPE",
158: "DESCRIPTOR_ARRAY_TYPE",
159: "EMBEDDER_DATA_ARRAY_TYPE",
160: "FEEDBACK_METADATA_TYPE",
161: "FEEDBACK_VECTOR_TYPE",
162: "FILLER_TYPE",
163: "FREE_SPACE_TYPE",
- 164: "MAP_TYPE",
- 165: "PREPARSE_DATA_TYPE",
- 166: "PROPERTY_ARRAY_TYPE",
- 167: "PROPERTY_CELL_TYPE",
- 168: "SHARED_FUNCTION_INFO_TYPE",
- 169: "WEAK_ARRAY_LIST_TYPE",
- 170: "WEAK_CELL_TYPE",
- 171: "JS_PROXY_TYPE",
+ 164: "INTERNAL_CLASS_TYPE",
+ 165: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 166: "MAP_TYPE",
+ 167: "PREPARSE_DATA_TYPE",
+ 168: "PROPERTY_ARRAY_TYPE",
+ 169: "PROPERTY_CELL_TYPE",
+ 170: "SHARED_FUNCTION_INFO_TYPE",
+ 171: "SMI_BOX_TYPE",
+ 172: "SMI_PAIR_TYPE",
+ 173: "SORT_STATE_TYPE",
+ 174: "WEAK_ARRAY_LIST_TYPE",
+ 175: "WEAK_CELL_TYPE",
+ 176: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 172: "JS_GLOBAL_OBJECT_TYPE",
- 173: "JS_GLOBAL_PROXY_TYPE",
- 174: "JS_MODULE_NAMESPACE_TYPE",
+ 177: "JS_GLOBAL_OBJECT_TYPE",
+ 178: "JS_GLOBAL_PROXY_TYPE",
+ 179: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1042: "JS_MAP_KEY_ITERATOR_TYPE",
@@ -165,8 +170,8 @@ INSTANCE_TYPES = {
1066: "JS_DATE_TIME_FORMAT_TYPE",
1067: "JS_DISPLAY_NAMES_TYPE",
1068: "JS_ERROR_TYPE",
- 1069: "JS_FINALIZATION_GROUP_TYPE",
- 1070: "JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE",
+ 1069: "JS_FINALIZATION_REGISTRY_TYPE",
+ 1070: "JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE",
1071: "JS_LIST_FORMAT_TYPE",
1072: "JS_LOCALE_TYPE",
1073: "JS_MESSAGE_OBJECT_TYPE",
@@ -194,10 +199,10 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
("read_only_space", 0x00121): (163, "FreeSpaceMap"),
- ("read_only_space", 0x00149): (164, "MetaMap"),
+ ("read_only_space", 0x00149): (166, "MetaMap"),
("read_only_space", 0x0018d): (67, "NullMap"),
("read_only_space", 0x001c5): (158, "DescriptorArrayMap"),
- ("read_only_space", 0x001f5): (153, "WeakFixedArrayMap"),
+ ("read_only_space", 0x001f5): (152, "WeakFixedArrayMap"),
("read_only_space", 0x0021d): (162, "OnePointerFillerMap"),
("read_only_space", 0x00245): (162, "TwoPointerFillerMap"),
("read_only_space", 0x00289): (67, "UninitializedMap"),
@@ -206,19 +211,19 @@ KNOWN_MAPS = {
("read_only_space", 0x0035d): (66, "HeapNumberMap"),
("read_only_space", 0x003a1): (67, "TheHoleMap"),
("read_only_space", 0x00401): (67, "BooleanMap"),
- ("read_only_space", 0x00489): (135, "ByteArrayMap"),
- ("read_only_space", 0x004b1): (120, "FixedArrayMap"),
- ("read_only_space", 0x004d9): (120, "FixedCOWArrayMap"),
- ("read_only_space", 0x00501): (121, "HashTableMap"),
+ ("read_only_space", 0x00489): (131, "ByteArrayMap"),
+ ("read_only_space", 0x004b1): (116, "FixedArrayMap"),
+ ("read_only_space", 0x004d9): (116, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00501): (117, "HashTableMap"),
("read_only_space", 0x00529): (64, "SymbolMap"),
("read_only_space", 0x00551): (40, "OneByteStringMap"),
- ("read_only_space", 0x00579): (133, "ScopeInfoMap"),
- ("read_only_space", 0x005a1): (168, "SharedFunctionInfoMap"),
- ("read_only_space", 0x005c9): (156, "CodeMap"),
- ("read_only_space", 0x005f1): (155, "CellMap"),
- ("read_only_space", 0x00619): (167, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00579): (129, "ScopeInfoMap"),
+ ("read_only_space", 0x005a1): (170, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x005c9): (155, "CodeMap"),
+ ("read_only_space", 0x005f1): (154, "CellMap"),
+ ("read_only_space", 0x00619): (169, "GlobalPropertyCellMap"),
("read_only_space", 0x00641): (70, "ForeignMap"),
- ("read_only_space", 0x00669): (154, "TransitionArrayMap"),
+ ("read_only_space", 0x00669): (153, "TransitionArrayMap"),
("read_only_space", 0x00691): (45, "ThinOneByteStringMap"),
("read_only_space", 0x006b9): (161, "FeedbackVectorMap"),
("read_only_space", 0x0070d): (67, "ArgumentsMarkerMap"),
@@ -226,113 +231,118 @@ KNOWN_MAPS = {
("read_only_space", 0x007c9): (67, "TerminationExceptionMap"),
("read_only_space", 0x00831): (67, "OptimizedOutMap"),
("read_only_space", 0x00891): (67, "StaleRegisterMap"),
- ("read_only_space", 0x008d5): (134, "ScriptContextTableMap"),
- ("read_only_space", 0x008fd): (131, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x008d5): (130, "ScriptContextTableMap"),
+ ("read_only_space", 0x008fd): (127, "ClosureFeedbackCellArrayMap"),
("read_only_space", 0x00925): (160, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x0094d): (120, "ArrayListMap"),
+ ("read_only_space", 0x0094d): (116, "ArrayListMap"),
("read_only_space", 0x00975): (65, "BigIntMap"),
- ("read_only_space", 0x0099d): (132, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x009c5): (136, "BytecodeArrayMap"),
- ("read_only_space", 0x009ed): (157, "CodeDataContainerMap"),
- ("read_only_space", 0x00a15): (137, "FixedDoubleArrayMap"),
- ("read_only_space", 0x00a3d): (123, "GlobalDictionaryMap"),
- ("read_only_space", 0x00a65): (96, "ManyClosuresCellMap"),
- ("read_only_space", 0x00a8d): (120, "ModuleInfoMap"),
- ("read_only_space", 0x00ab5): (124, "NameDictionaryMap"),
- ("read_only_space", 0x00add): (96, "NoClosuresCellMap"),
- ("read_only_space", 0x00b05): (125, "NumberDictionaryMap"),
- ("read_only_space", 0x00b2d): (96, "OneClosureCellMap"),
- ("read_only_space", 0x00b55): (126, "OrderedHashMapMap"),
- ("read_only_space", 0x00b7d): (127, "OrderedHashSetMap"),
- ("read_only_space", 0x00ba5): (128, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x00bcd): (165, "PreparseDataMap"),
- ("read_only_space", 0x00bf5): (166, "PropertyArrayMap"),
- ("read_only_space", 0x00c1d): (92, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x00c45): (92, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x00c6d): (92, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x00c95): (129, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x00cbd): (120, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x00ce5): (148, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x00d0d): (149, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x00d35): (150, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x00d5d): (68, "SourceTextModuleMap"),
- ("read_only_space", 0x00d85): (130, "StringTableMap"),
- ("read_only_space", 0x00dad): (69, "SyntheticModuleMap"),
- ("read_only_space", 0x00dd5): (152, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x00dfd): (151, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x00e25): (169, "WeakArrayListMap"),
- ("read_only_space", 0x00e4d): (122, "EphemeronHashTableMap"),
- ("read_only_space", 0x00e75): (159, "EmbedderDataArrayMap"),
- ("read_only_space", 0x00e9d): (170, "WeakCellMap"),
- ("read_only_space", 0x00ec5): (32, "StringMap"),
- ("read_only_space", 0x00eed): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x00f15): (33, "ConsStringMap"),
- ("read_only_space", 0x00f3d): (37, "ThinStringMap"),
- ("read_only_space", 0x00f65): (35, "SlicedStringMap"),
- ("read_only_space", 0x00f8d): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x00fb5): (34, "ExternalStringMap"),
- ("read_only_space", 0x00fdd): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x01005): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x0102d): (0, "InternalizedStringMap"),
- ("read_only_space", 0x01055): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x0107d): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x010a5): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x010cd): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x010f5): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x0111d): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x01151): (95, "EnumCacheMap"),
- ("read_only_space", 0x011a1): (86, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x0129d): (98, "InterceptorInfoMap"),
- ("read_only_space", 0x032ad): (71, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x032d5): (72, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x032fd): (73, "CallableTaskMap"),
- ("read_only_space", 0x03325): (74, "CallbackTaskMap"),
- ("read_only_space", 0x0334d): (75, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x03375): (78, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x0339d): (79, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x033c5): (80, "AccessCheckInfoMap"),
- ("read_only_space", 0x033ed): (81, "AccessorInfoMap"),
- ("read_only_space", 0x03415): (82, "AccessorPairMap"),
- ("read_only_space", 0x0343d): (83, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x03465): (84, "AllocationMementoMap"),
- ("read_only_space", 0x0348d): (87, "AsmWasmDataMap"),
- ("read_only_space", 0x034b5): (88, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x034dd): (89, "BreakPointMap"),
- ("read_only_space", 0x03505): (90, "BreakPointInfoMap"),
- ("read_only_space", 0x0352d): (91, "CachedTemplateObjectMap"),
- ("read_only_space", 0x03555): (93, "ClassPositionsMap"),
- ("read_only_space", 0x0357d): (94, "DebugInfoMap"),
- ("read_only_space", 0x035a5): (97, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x035cd): (100, "InterpreterDataMap"),
- ("read_only_space", 0x035f5): (101, "PromiseCapabilityMap"),
- ("read_only_space", 0x0361d): (102, "PromiseReactionMap"),
- ("read_only_space", 0x03645): (103, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x0366d): (104, "PrototypeInfoMap"),
- ("read_only_space", 0x03695): (105, "ScriptMap"),
- ("read_only_space", 0x036bd): (109, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x036e5): (110, "StackFrameInfoMap"),
- ("read_only_space", 0x0370d): (111, "StackTraceFrameMap"),
- ("read_only_space", 0x03735): (112, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x0375d): (113, "Tuple2Map"),
- ("read_only_space", 0x03785): (114, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x037ad): (115, "WasmDebugInfoMap"),
- ("read_only_space", 0x037d5): (116, "WasmExceptionTagMap"),
- ("read_only_space", 0x037fd): (117, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x03825): (118, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x0384d): (119, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x03875): (99, "InternalClassMap"),
- ("read_only_space", 0x0389d): (107, "SmiPairMap"),
- ("read_only_space", 0x038c5): (106, "SmiBoxMap"),
- ("read_only_space", 0x038ed): (108, "SortStateMap"),
- ("read_only_space", 0x03915): (85, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x0393d): (85, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x03965): (76, "LoadHandler1Map"),
- ("read_only_space", 0x0398d): (76, "LoadHandler2Map"),
- ("read_only_space", 0x039b5): (76, "LoadHandler3Map"),
- ("read_only_space", 0x039dd): (77, "StoreHandler0Map"),
- ("read_only_space", 0x03a05): (77, "StoreHandler1Map"),
- ("read_only_space", 0x03a2d): (77, "StoreHandler2Map"),
- ("read_only_space", 0x03a55): (77, "StoreHandler3Map"),
+ ("read_only_space", 0x0099d): (128, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x009c5): (132, "BytecodeArrayMap"),
+ ("read_only_space", 0x009ed): (156, "CodeDataContainerMap"),
+ ("read_only_space", 0x00a15): (157, "CoverageInfoMap"),
+ ("read_only_space", 0x00a3d): (133, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x00a65): (119, "GlobalDictionaryMap"),
+ ("read_only_space", 0x00a8d): (96, "ManyClosuresCellMap"),
+ ("read_only_space", 0x00ab5): (116, "ModuleInfoMap"),
+ ("read_only_space", 0x00add): (120, "NameDictionaryMap"),
+ ("read_only_space", 0x00b05): (96, "NoClosuresCellMap"),
+ ("read_only_space", 0x00b2d): (121, "NumberDictionaryMap"),
+ ("read_only_space", 0x00b55): (96, "OneClosureCellMap"),
+ ("read_only_space", 0x00b7d): (122, "OrderedHashMapMap"),
+ ("read_only_space", 0x00ba5): (123, "OrderedHashSetMap"),
+ ("read_only_space", 0x00bcd): (124, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x00bf5): (167, "PreparseDataMap"),
+ ("read_only_space", 0x00c1d): (168, "PropertyArrayMap"),
+ ("read_only_space", 0x00c45): (92, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x00c6d): (92, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x00c95): (92, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x00cbd): (125, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x00ce5): (116, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x00d0d): (145, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x00d35): (146, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x00d5d): (147, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x00d85): (148, "SourceTextModuleMap"),
+ ("read_only_space", 0x00dad): (126, "StringTableMap"),
+ ("read_only_space", 0x00dd5): (149, "SyntheticModuleMap"),
+ ("read_only_space", 0x00dfd): (151, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x00e25): (150, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x00e4d): (174, "WeakArrayListMap"),
+ ("read_only_space", 0x00e75): (118, "EphemeronHashTableMap"),
+ ("read_only_space", 0x00e9d): (159, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x00ec5): (175, "WeakCellMap"),
+ ("read_only_space", 0x00eed): (32, "StringMap"),
+ ("read_only_space", 0x00f15): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x00f3d): (33, "ConsStringMap"),
+ ("read_only_space", 0x00f65): (37, "ThinStringMap"),
+ ("read_only_space", 0x00f8d): (35, "SlicedStringMap"),
+ ("read_only_space", 0x00fb5): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x00fdd): (34, "ExternalStringMap"),
+ ("read_only_space", 0x01005): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x0102d): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x01055): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x0107d): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x010a5): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x010cd): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x010f5): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x0111d): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x01145): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x01179): (95, "EnumCacheMap"),
+ ("read_only_space", 0x011c9): (86, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x012c5): (98, "InterceptorInfoMap"),
+ ("read_only_space", 0x032e5): (71, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x0330d): (72, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x03335): (73, "CallableTaskMap"),
+ ("read_only_space", 0x0335d): (74, "CallbackTaskMap"),
+ ("read_only_space", 0x03385): (75, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x033ad): (78, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x033d5): (79, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x033fd): (80, "AccessCheckInfoMap"),
+ ("read_only_space", 0x03425): (81, "AccessorInfoMap"),
+ ("read_only_space", 0x0344d): (82, "AccessorPairMap"),
+ ("read_only_space", 0x03475): (83, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x0349d): (84, "AllocationMementoMap"),
+ ("read_only_space", 0x034c5): (87, "AsmWasmDataMap"),
+ ("read_only_space", 0x034ed): (88, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x03515): (89, "BreakPointMap"),
+ ("read_only_space", 0x0353d): (90, "BreakPointInfoMap"),
+ ("read_only_space", 0x03565): (91, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x0358d): (93, "ClassPositionsMap"),
+ ("read_only_space", 0x035b5): (94, "DebugInfoMap"),
+ ("read_only_space", 0x035dd): (97, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x03605): (99, "InterpreterDataMap"),
+ ("read_only_space", 0x0362d): (100, "PromiseCapabilityMap"),
+ ("read_only_space", 0x03655): (101, "PromiseReactionMap"),
+ ("read_only_space", 0x0367d): (102, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x036a5): (103, "PrototypeInfoMap"),
+ ("read_only_space", 0x036cd): (104, "ScriptMap"),
+ ("read_only_space", 0x036f5): (105, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x0371d): (106, "StackFrameInfoMap"),
+ ("read_only_space", 0x03745): (107, "StackTraceFrameMap"),
+ ("read_only_space", 0x0376d): (108, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x03795): (109, "Tuple2Map"),
+ ("read_only_space", 0x037bd): (110, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x037e5): (111, "WasmDebugInfoMap"),
+ ("read_only_space", 0x0380d): (112, "WasmExceptionTagMap"),
+ ("read_only_space", 0x03835): (113, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x0385d): (114, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x03885): (115, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x038ad): (134, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x038d5): (165, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x038fd): (164, "InternalClassMap"),
+ ("read_only_space", 0x03925): (172, "SmiPairMap"),
+ ("read_only_space", 0x0394d): (171, "SmiBoxMap"),
+ ("read_only_space", 0x03975): (68, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x0399d): (69, "ExportedSubClassMap"),
+ ("read_only_space", 0x039c5): (173, "SortStateMap"),
+ ("read_only_space", 0x039ed): (85, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x03a15): (85, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x03a3d): (76, "LoadHandler1Map"),
+ ("read_only_space", 0x03a65): (76, "LoadHandler2Map"),
+ ("read_only_space", 0x03a8d): (76, "LoadHandler3Map"),
+ ("read_only_space", 0x03ab5): (77, "StoreHandler0Map"),
+ ("read_only_space", 0x03add): (77, "StoreHandler1Map"),
+ ("read_only_space", 0x03b05): (77, "StoreHandler2Map"),
+ ("read_only_space", 0x03b2d): (77, "StoreHandler3Map"),
("map_space", 0x00121): (1057, "ExternalMap"),
("map_space", 0x00149): (1073, "JSMessageObjectMap"),
}
@@ -357,32 +367,32 @@ KNOWN_OBJECTS = {
("read_only_space", 0x007ad): "TerminationException",
("read_only_space", 0x00815): "OptimizedOut",
("read_only_space", 0x00875): "StaleRegister",
- ("read_only_space", 0x01145): "EmptyEnumCache",
- ("read_only_space", 0x01179): "EmptyPropertyArray",
- ("read_only_space", 0x01181): "EmptyByteArray",
- ("read_only_space", 0x01189): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x01195): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x011c9): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x011d1): "EmptySloppyArgumentsElements",
- ("read_only_space", 0x011e1): "EmptySlowElementDictionary",
- ("read_only_space", 0x01205): "EmptyOrderedHashMap",
- ("read_only_space", 0x01219): "EmptyOrderedHashSet",
- ("read_only_space", 0x0122d): "EmptyFeedbackMetadata",
- ("read_only_space", 0x01239): "EmptyPropertyCell",
- ("read_only_space", 0x0124d): "EmptyPropertyDictionary",
- ("read_only_space", 0x01275): "NoOpInterceptorInfo",
- ("read_only_space", 0x012c5): "EmptyWeakArrayList",
- ("read_only_space", 0x012d1): "InfinityValue",
- ("read_only_space", 0x012dd): "MinusZeroValue",
- ("read_only_space", 0x012e9): "MinusInfinityValue",
- ("read_only_space", 0x012f5): "SelfReferenceMarker",
- ("read_only_space", 0x01335): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x01341): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x0134d): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x01359): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x01391): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x013b9): "NativeScopeInfo",
- ("read_only_space", 0x013d5): "HashSeed",
+ ("read_only_space", 0x0116d): "EmptyEnumCache",
+ ("read_only_space", 0x011a1): "EmptyPropertyArray",
+ ("read_only_space", 0x011a9): "EmptyByteArray",
+ ("read_only_space", 0x011b1): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x011bd): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x011f1): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x011f9): "EmptySloppyArgumentsElements",
+ ("read_only_space", 0x01209): "EmptySlowElementDictionary",
+ ("read_only_space", 0x0122d): "EmptyOrderedHashMap",
+ ("read_only_space", 0x01241): "EmptyOrderedHashSet",
+ ("read_only_space", 0x01255): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x01261): "EmptyPropertyCell",
+ ("read_only_space", 0x01275): "EmptyPropertyDictionary",
+ ("read_only_space", 0x0129d): "NoOpInterceptorInfo",
+ ("read_only_space", 0x012ed): "EmptyWeakArrayList",
+ ("read_only_space", 0x012f9): "InfinityValue",
+ ("read_only_space", 0x01305): "MinusZeroValue",
+ ("read_only_space", 0x01311): "MinusInfinityValue",
+ ("read_only_space", 0x0131d): "SelfReferenceMarker",
+ ("read_only_space", 0x0135d): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x01369): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x01375): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x01381): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x013b9): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x013e1): "NativeScopeInfo",
+ ("read_only_space", 0x013fd): "HashSeed",
("old_space", 0x00121): "ArgumentsIteratorAccessor",
("old_space", 0x00165): "ArrayLengthAccessor",
("old_space", 0x001a9): "BoundFunctionLengthAccessor",
@@ -436,6 +446,7 @@ FRAME_MARKERS = (
"WASM_TO_JS",
"JS_TO_WASM",
"WASM_INTERPRETER_ENTRY",
+ "WASM_DEBUG_BREAK",
"C_WASM_ENTRY",
"WASM_EXIT",
"WASM_COMPILE_LAZY",
diff --git a/deps/v8/tools/v8windbg/BUILD.gn b/deps/v8/tools/v8windbg/BUILD.gn
new file mode 100644
index 0000000000..10d06a127f
--- /dev/null
+++ b/deps/v8/tools/v8windbg/BUILD.gn
@@ -0,0 +1,116 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+config("v8windbg_config") {
+ # Required for successful compilation of SDK header file DbgModel.h.
+ cflags_cc = [ "/Zc:twoPhase-" ]
+
+ include_dirs = [ "../.." ]
+}
+
+# Basic support for WinDbg extensions, with nothing specific to V8.
+source_set("v8windbg_base") {
+ testonly = true
+
+ sources = [
+ "base/dbgext.cc",
+ "base/dbgext.h",
+ "base/utilities.cc",
+ "base/utilities.h",
+ ]
+
+ libs = [
+ "DbgEng.lib",
+ "DbgModel.lib",
+ "RuntimeObject.lib",
+ "comsuppwd.lib",
+ ]
+
+ public_configs = [ ":v8windbg_config" ]
+}
+
+# An extension DLL that can be loaded into WinDbg with `.load v8windbg`.
+v8_shared_library("v8windbg") {
+ testonly = true
+
+ sources = [
+ "base/dbgext.def",
+ "src/cur-isolate.cc",
+ "src/cur-isolate.h",
+ "src/list-chunks.cc",
+ "src/list-chunks.h",
+ "src/local-variables.cc",
+ "src/local-variables.h",
+ "src/object-inspection.cc",
+ "src/object-inspection.h",
+ "src/v8-debug-helper-interop.cc",
+ "src/v8-debug-helper-interop.h",
+ "src/v8windbg-extension.cc",
+ "src/v8windbg-extension.h",
+ ]
+
+ deps = [
+ ":v8windbg_base",
+ "../debug_helper:v8_debug_helper",
+ ]
+}
+
+# Copies Windows SDK files that v8windbg_test needs.
+action("copy_prereqs") {
+ testonly = true
+
+ script = "copy-prereqs.py"
+
+ inputs = [
+ script,
+ "//build/vs_toolchain.py",
+ ]
+
+ outputs = [ "$root_out_dir/dbgeng.dll" ]
+
+ args = [
+ rebase_path("//build"),
+ rebase_path(root_out_dir),
+ target_cpu,
+ ]
+}
+
+# A test that launches a separate d8 process and debugs it using v8windbg.
+v8_source_set("v8windbg_test") {
+ testonly = true
+
+ sources = [
+ "test/debug-callbacks.cc",
+ "test/debug-callbacks.h",
+ "test/v8windbg-test.cc",
+ "test/v8windbg-test.h",
+ ]
+
+ deps = [ "../..:v8_libbase" ] # For CHECK macro.
+
+ data_deps = [
+ ":copy_prereqs",
+ ":v8windbg",
+ ":v8windbg_test_script",
+ "../..:d8",
+ ]
+
+ libs = [
+ "DbgEng.lib",
+ "DbgModel.lib",
+ "Pathcch.lib",
+ "RuntimeObject.lib",
+ ]
+
+ configs = [ ":v8windbg_config" ]
+}
+
+# Copies the JavaScript file used by v8windbg_test.
+copy("v8windbg_test_script") {
+ testonly = true
+ sources = [ "test/script.js" ]
+ outputs = [ "$target_out_dir/v8windbg-test-script.js" ]
+}
diff --git a/deps/v8/tools/v8windbg/README.md b/deps/v8/tools/v8windbg/README.md
new file mode 100644
index 0000000000..dc0c4e1040
--- /dev/null
+++ b/deps/v8/tools/v8windbg/README.md
@@ -0,0 +1,151 @@
+# v8windbg
+
+V8windbg is a WinDbg extension for the V8 engine. It adjusts the behavior of the
+Locals pane and corresponding `dx` commands to display useful data when
+inspecting V8 object types. It is intended to be as robust as possible in dumps
+with limited memory, and should work equally well in live sessions, crash dumps,
+and time travel debugging.
+
+## Building
+
+Run `autoninja v8windbg` in your output directory.
+
+## Using
+
+In WinDbgX, run `.load path\to\your\output\dir\v8windbg.dll` to load the
+extension. To inspect V8 objects, use the Locals window or the `dx` command as
+usual.
+
+**Important notes:**
+
+- The version of v8windbg must exactly match the version and build configuration
+ of the process you're debugging. (To find the version number of a module in a
+ crash dump, enter `lm` and click the module name, or run `lmDvm modulename`.)
+- V8windbg relies on detailed symbols (symbol_level = 2).
+- Ensure also that WinDbg can load the symbols (.pdb file) for the module
+ containing V8.
+- Cross-architecture debugging is possible in some cases:
+ - To debug an x86 process on x64, load the x86 build of v8windbg.
+ - To debug an ARM64 process on x64, load the ARM64 simulator build of v8windbg
+ (built with target_cpu="x64" and v8_target_cpu="arm64").
+
+As well as improving the Locals pane behavior, v8windbg also provides a few
+functions that can be called from within `dx` commands:
+
+- `@$v8object()` returns information about the fields of a tagged V8 value,
+ passed in as a plain number like `dx @$v8object(0x34f49880471)`. This invokes
+ the same logic that is used for the locals pane. You may also pass a type hint
+ as an optional second parameter if you find that v8windbg is not inferring the
+ correct type (which can happen when the memory for the object's Map wasn't
+ collected in a crash dump). The type hint is a fully-qualified C++ class name,
+ like `dx @$v8object(0x34f49880471, "v8::internal::JSArray")`.
+- `@$curisolate()` gets the Isolate pointer for the current thread, if the
+ current thread has a JavaScript Isolate associated.
+- `@$listchunks()` returns a list of the memory chunks in the Heap for the
+ current Isolate.
+
+*Tip:*: to see what objects are present in a chunk of heap memory, you can cast
+it to an array of `TaggedValue`, like this:
+
+`dx (v8::internal::TaggedValue(*)[64])0x34f49880450`
+
+## Architecture
+
+V8windbg uses the [DataModel] as much as possible as opposed to the older
+[DbgEng] APIs. It uses the [WRL COM] APIs due to limitations in Clang's support
+for [C++/WinRT COM].
+
+Where possible, v8windbg uses the cross-platform v8_debug_helper library to
+avoid depending on V8 internals.
+
+The source in `./base` is a generic starting point for implementing a WinDbg
+extension. The V8-specific implementation under `./src` then implements the two
+functions declared in `dbgext.h` to create and destroy the extension instance.
+
+`./src` file index:
+
+- `cur-isolate.{cc,h}` implements the `IModelMethod` for `@$curisolate()`.
+- `list-chunks.{cc,h}` implements the `IModelMethod` for `@$listchunks()`. Its
+ result is a custom object that supports iteration and indexing.
+- `local-variables.{cc,h}` implements the `IModelPropertyAccessor` that provides
+ content to show in the Locals pane for stack frames corresponding to builtins
+ or runtime-generated code.
+- `object-inspection.{cc,h}` contains various classes that allow the debugger to
+ show fields within V8 objects.
+- `v8-debug-helper-interop.{cc,h}` makes requests to the V8 postmortem debugging
+ API, and converts the results into simple C++ structs.
+- `v8windbg-extension.{cc,h}` is responsible for initializing the extension and
+ cleaning up when the extension is unloaded.
+
+When the extension is initialized (`Extension::Initialize()`):
+
+- It registers a "parent model" for all known V8 object types, such as
+ `v8::internal::HeapObject` and `v8::internal::Symbol`. Any time WinDbg needs
+ to represent a value with one of these types, it creates an `IModelObject`
+ representing the value and attaches the parent model. This particular parent
+ model supports `IStringDisplayableConcept` and `IDynamicKeyProviderConcept`,
+ meaning the debugger will call a custom method every time it wants to get a
+ description string or a list of fields for any of these objects.
+- It registers a different parent model, with a single property getter named
+ "Value", for handle types such as `v8::internal::Handle<*>`. The "Value"
+ getter returns the correctly-typed tagged pointer contained by the handle.
+- It overrides the getter functions for "LocalVariables" and "Parameters" on the
+ parent model for stack frames. When the user selects a stack frame, WinDbg
+ calls these getter functions to determine what it should show in the Locals
+ pane.
+- It registers the function aliases such as `@$curisolate()`.
+
+The `./test` directory contains a test function that exercises v8windbg. It does
+not require WinDbg, but uses DbgEng.dll and DbgModel.dll from the Windows SDK
+(these are slightly older versions of the same modules used by WinDbg). The test
+function launches a separate d8 process, attaches to that process as a debugger,
+lets d8 run until it hits a breakpoint, and then checks the output of a few `dx`
+commands.
+
+## Debugging the extension
+
+To debug the extension, launch a WinDbgx instance to debug with an active
+target, e.g.
+
+`windbgx \src\github\v8\out\x64.debug\d8.exe -e "console.log('hello');"`
+
+or
+
+`windbgx \src\github\v8\out\x64.debug\d8.exe c:\temp\test.js`
+
+The WinDbgx process itself does not host the extensions, but uses a helper
+process. Attach another instance of WinDbgx to the `enghost.exe` helper process,
+e.g.
+
+`windbgx -pn enghost.exe`
+
+Set a breakpoint in this second session for when the extension initializes, e.g.
+
+`bm v8windbg!DebugExtensionInitialize`
+
+..and/or whenever a function of interest is invoked, e.g.
+
+ - `bp v8windbg!CurrIsolateAlias::Call` for the invocation of `@$curisolate()`
+ - `bp v8windbg!GetHeapObject` for the interpretation of V8 objects.
+
+Load the extension in the target debugger (the first WinDbg session), which
+should trigger the breakpoint.
+
+`.load "C:\\src\\github\\v8windbg\\x64\\v8windbg.dll"`
+
+Note: For D8, the below is a good breakpoint to set just before any script is
+run:
+
+`bp d8_exe!v8::Shell::ExecuteString`
+
+..or the below for once the V8 engine is entered (for component builds):
+
+`bp v8!v8::Script::Run`
+
+Then trigger the extension code of interest via something like `dx source` or
+`dx @$curisolate()`.
+
+[DataModel]: https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/data-model-cpp-overview
+[DbgEng]: https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/writing-dbgeng-extension-code
+[C++/WinRT COM]: https://docs.microsoft.com/en-us/windows/uwp/cpp-and-winrt-apis/consume-com
+[WRL COM]: https://docs.microsoft.com/en-us/cpp/cppcx/wrl/windows-runtime-cpp-template-library-wrl?view=vs-2019
diff --git a/deps/v8/tools/v8windbg/base/dbgext.cc b/deps/v8/tools/v8windbg/base/dbgext.cc
new file mode 100644
index 0000000000..e3f00951b4
--- /dev/null
+++ b/deps/v8/tools/v8windbg/base/dbgext.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/base/dbgext.h"
+
+#include <crtdbg.h>
+#include <wrl/module.h>
+
+#include "tools/v8windbg/base/utilities.h"
+
+// See
+// https://docs.microsoft.com/en-us/visualstudio/debugger/crt-debugging-techniques
+// for the memory leak and debugger reporting macros used from <crtdbg.h>
+_CrtMemState mem_old, mem_new, mem_diff;
+int original_crt_dbg_flag = 0;
+
+WRL::ComPtr<IDataModelManager> sp_data_model_manager;
+WRL::ComPtr<IDebugHost> sp_debug_host;
+WRL::ComPtr<IDebugControl5> sp_debug_control;
+WRL::ComPtr<IDebugHostMemory2> sp_debug_host_memory;
+WRL::ComPtr<IDebugHostSymbols> sp_debug_host_symbols;
+WRL::ComPtr<IDebugHostExtensibility> sp_debug_host_extensibility;
+
+extern "C" {
+
+HRESULT
+__stdcall DebugExtensionInitialize(PULONG /*pVersion*/, PULONG /*pFlags*/) {
+ original_crt_dbg_flag = _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF);
+ _CrtMemCheckpoint(&mem_old);
+
+ WRL::ComPtr<IDebugClient> sp_debug_client;
+ WRL::ComPtr<IHostDataModelAccess> sp_data_model_access;
+
+ RETURN_IF_FAIL(DebugCreate(__uuidof(IDebugClient), &sp_debug_client));
+
+ RETURN_IF_FAIL(sp_debug_client.As(&sp_data_model_access));
+ RETURN_IF_FAIL(sp_debug_client.As(&sp_debug_control));
+
+ RETURN_IF_FAIL(sp_data_model_access->GetDataModel(&sp_data_model_manager,
+ &sp_debug_host));
+
+ RETURN_IF_FAIL(sp_debug_host.As(&sp_debug_host_memory));
+ RETURN_IF_FAIL(sp_debug_host.As(&sp_debug_host_symbols));
+ RETURN_IF_FAIL(sp_debug_host.As(&sp_debug_host_extensibility));
+
+ return CreateExtension();
+}
+
+void __stdcall DebugExtensionUninitialize() {
+ DestroyExtension();
+ sp_debug_host = nullptr;
+ sp_data_model_manager = nullptr;
+ sp_debug_host_memory = nullptr;
+ sp_debug_host_symbols = nullptr;
+ sp_debug_host_extensibility = nullptr;
+
+ _CrtMemCheckpoint(&mem_new);
+ if (_CrtMemDifference(&mem_diff, &mem_old, &mem_new)) {
+ _CrtMemDumpStatistics(&mem_diff);
+ }
+ _CrtSetDbgFlag(original_crt_dbg_flag);
+}
+
+HRESULT __stdcall DebugExtensionCanUnload(void) {
+ if (!WRL::Module<WRL::InProc>::GetModule().Terminate()) {
+ _RPTF0(_CRT_WARN, "Failed to unload WRL\n");
+ return S_FALSE;
+ }
+ return S_OK;
+}
+
+void __stdcall DebugExtensionUnload() { return; }
+
+} // extern "C"
diff --git a/deps/v8/tools/v8windbg/base/dbgext.def b/deps/v8/tools/v8windbg/base/dbgext.def
new file mode 100644
index 0000000000..7f32756c35
--- /dev/null
+++ b/deps/v8/tools/v8windbg/base/dbgext.def
@@ -0,0 +1,5 @@
+EXPORTS
+ DebugExtensionInitialize
+ DebugExtensionUninitialize
+ DebugExtensionCanUnload
+ DebugExtensionUnload
diff --git a/deps/v8/tools/v8windbg/base/dbgext.h b/deps/v8/tools/v8windbg/base/dbgext.h
new file mode 100644
index 0000000000..8b36a8f361
--- /dev/null
+++ b/deps/v8/tools/v8windbg/base/dbgext.h
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_BASE_DBGEXT_H_
+#define V8_TOOLS_V8WINDBG_BASE_DBGEXT_H_
+
+#if !defined(UNICODE) || !defined(_UNICODE)
+#error Unicode not defined
+#endif
+
+#include <DbgEng.h>
+#include <DbgModel.h>
+#include <Windows.h>
+#include <crtdbg.h>
+#include <wrl/client.h>
+
+#include <string>
+
+namespace WRL = Microsoft::WRL;
+
+// Globals for use throughout the extension. (Populated on load).
+extern WRL::ComPtr<IDataModelManager> sp_data_model_manager;
+extern WRL::ComPtr<IDebugHost> sp_debug_host;
+extern WRL::ComPtr<IDebugControl5> sp_debug_control;
+extern WRL::ComPtr<IDebugHostMemory2> sp_debug_host_memory;
+extern WRL::ComPtr<IDebugHostSymbols> sp_debug_host_symbols;
+extern WRL::ComPtr<IDebugHostExtensibility> sp_debug_host_extensibility;
+
+// To be implemented by the custom extension code. (Called on load).
+HRESULT CreateExtension();
+void DestroyExtension();
+
+#endif // V8_TOOLS_V8WINDBG_BASE_DBGEXT_H_
diff --git a/deps/v8/tools/v8windbg/base/utilities.cc b/deps/v8/tools/v8windbg/base/utilities.cc
new file mode 100644
index 0000000000..a59e95f46f
--- /dev/null
+++ b/deps/v8/tools/v8windbg/base/utilities.cc
@@ -0,0 +1,246 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/base/utilities.h"
+
+#include <comutil.h>
+#include <oleauto.h>
+
+#include <vector>
+
+namespace {
+
+HRESULT BoxObject(IDataModelManager* p_manager, IUnknown* p_object,
+ ModelObjectKind kind, IModelObject** pp_model_object) {
+ *pp_model_object = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_UNKNOWN;
+ vt_val.punkVal = p_object;
+
+ HRESULT hr = p_manager->CreateIntrinsicObject(kind, &vt_val, pp_model_object);
+ return hr;
+}
+
+} // namespace
+
+HRESULT CreateProperty(IDataModelManager* p_manager,
+ IModelPropertyAccessor* p_property,
+ IModelObject** pp_property_object) {
+ return BoxObject(p_manager, p_property, ObjectPropertyAccessor,
+ pp_property_object);
+}
+
+HRESULT CreateMethod(IDataModelManager* p_manager, IModelMethod* p_method,
+ IModelObject** pp_method_object) {
+ return BoxObject(p_manager, p_method, ObjectMethod, pp_method_object);
+}
+
+HRESULT UnboxProperty(IModelObject* object, IModelPropertyAccessor** result) {
+ ModelObjectKind kind = (ModelObjectKind)-1;
+ RETURN_IF_FAIL(object->GetKind(&kind));
+ if (kind != ObjectPropertyAccessor) return E_FAIL;
+ _variant_t variant;
+ RETURN_IF_FAIL(object->GetIntrinsicValue(&variant));
+ if (variant.vt != VT_UNKNOWN) return E_FAIL;
+ WRL::ComPtr<IModelPropertyAccessor> accessor;
+ RETURN_IF_FAIL(WRL::ComPtr<IUnknown>(variant.punkVal).As(&accessor));
+ *result = accessor.Detach();
+ return S_OK;
+}
+
+HRESULT CreateTypedIntrinsic(uint64_t value, IDebugHostType* type,
+ IModelObject** result) {
+ // Figure out what kind of VARIANT we need to make.
+ IntrinsicKind kind;
+ VARTYPE carrier;
+ RETURN_IF_FAIL(type->GetIntrinsicType(&kind, &carrier));
+
+ VARIANT vt_val;
+ switch (carrier) {
+ case VT_BOOL:
+ vt_val.boolVal = value ? VARIANT_TRUE : VARIANT_FALSE;
+ break;
+ case VT_I1:
+ vt_val.cVal = static_cast<int8_t>(value);
+ break;
+ case VT_UI1:
+ vt_val.bVal = static_cast<uint8_t>(value);
+ break;
+ case VT_I2:
+ vt_val.iVal = static_cast<int16_t>(value);
+ break;
+ case VT_UI2:
+ vt_val.uiVal = static_cast<uint16_t>(value);
+ break;
+ case VT_INT:
+ vt_val.intVal = static_cast<int>(value);
+ break;
+ case VT_UINT:
+ vt_val.uintVal = static_cast<unsigned int>(value);
+ break;
+ case VT_I4:
+ vt_val.lVal = static_cast<int32_t>(value);
+ break;
+ case VT_UI4:
+ vt_val.ulVal = static_cast<uint32_t>(value);
+ break;
+ case VT_INT_PTR:
+ vt_val.llVal = static_cast<intptr_t>(value);
+ break;
+ case VT_UINT_PTR:
+ vt_val.ullVal = static_cast<uintptr_t>(value);
+ break;
+ case VT_I8:
+ vt_val.llVal = static_cast<int64_t>(value);
+ break;
+ case VT_UI8:
+ vt_val.ullVal = static_cast<uint64_t>(value);
+ break;
+ default:
+ return E_FAIL;
+ }
+ vt_val.vt = carrier;
+ return sp_data_model_manager->CreateTypedIntrinsicObject(&vt_val, type,
+ result);
+}
+
+HRESULT CreateULong64(ULONG64 value, IModelObject** pp_int) {
+ HRESULT hr = S_OK;
+ *pp_int = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_UI8;
+ vt_val.ullVal = value;
+
+ hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+ pp_int);
+ return hr;
+}
+
+HRESULT UnboxULong64(IModelObject* object, ULONG64* value, bool convert) {
+ ModelObjectKind kind = (ModelObjectKind)-1;
+ RETURN_IF_FAIL(object->GetKind(&kind));
+ if (kind != ObjectIntrinsic) return E_FAIL;
+ _variant_t variant;
+ RETURN_IF_FAIL(object->GetIntrinsicValue(&variant));
+ if (convert) {
+ RETURN_IF_FAIL(VariantChangeType(&variant, &variant, 0, VT_UI8));
+ }
+ if (variant.vt != VT_UI8) return E_FAIL;
+ *value = variant.ullVal;
+ return S_OK;
+}
+
+HRESULT CreateInt32(int value, IModelObject** pp_int) {
+ HRESULT hr = S_OK;
+ *pp_int = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_I4;
+ vt_val.intVal = value;
+
+ hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+ pp_int);
+ return hr;
+}
+
+HRESULT CreateUInt32(uint32_t value, IModelObject** pp_int) {
+ HRESULT hr = S_OK;
+ *pp_int = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_UI4;
+ vt_val.uintVal = value;
+
+ hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+ pp_int);
+ return hr;
+}
+
+HRESULT CreateBool(bool value, IModelObject** pp_val) {
+ HRESULT hr = S_OK;
+ *pp_val = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_BOOL;
+ vt_val.boolVal = value;
+
+ hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+ pp_val);
+ return hr;
+}
+
+HRESULT CreateNumber(double value, IModelObject** pp_val) {
+ HRESULT hr = S_OK;
+ *pp_val = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_R8;
+ vt_val.dblVal = value;
+
+ hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+ pp_val);
+ return hr;
+}
+
+HRESULT CreateString(std::u16string value, IModelObject** pp_val) {
+ HRESULT hr = S_OK;
+ *pp_val = nullptr;
+
+ VARIANT vt_val;
+ vt_val.vt = VT_BSTR;
+ vt_val.bstrVal =
+ ::SysAllocString(reinterpret_cast<const OLECHAR*>(value.c_str()));
+
+ hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+ pp_val);
+ return hr;
+}
+
+HRESULT UnboxString(IModelObject* object, BSTR* value) {
+ ModelObjectKind kind = (ModelObjectKind)-1;
+ RETURN_IF_FAIL(object->GetKind(&kind));
+ if (kind != ObjectIntrinsic) return E_FAIL;
+ _variant_t variant;
+ RETURN_IF_FAIL(object->GetIntrinsicValue(&variant));
+ if (variant.vt != VT_BSTR) return E_FAIL;
+ *value = variant.Detach().bstrVal;
+ return S_OK;
+}
+
+HRESULT GetModelAtIndex(WRL::ComPtr<IModelObject>& sp_parent,
+ WRL::ComPtr<IModelObject>& sp_index,
+ IModelObject** p_result) {
+ WRL::ComPtr<IIndexableConcept> sp_indexable_concept;
+ RETURN_IF_FAIL(sp_parent->GetConcept(__uuidof(IIndexableConcept),
+ &sp_indexable_concept, nullptr));
+
+ std::vector<IModelObject*> p_indexers{sp_index.Get()};
+ return sp_indexable_concept->GetAt(sp_parent.Get(), 1, p_indexers.data(),
+ p_result, nullptr);
+}
+
+HRESULT GetCurrentThread(WRL::ComPtr<IDebugHostContext>& sp_host_context,
+ IModelObject** p_current_thread) {
+ WRL::ComPtr<IModelObject> sp_boxed_context, sp_root_namespace;
+ WRL::ComPtr<IModelObject> sp_debugger, sp_sessions, sp_processes, sp_threads;
+ WRL::ComPtr<IModelObject> sp_curr_session, sp_curr_process;
+
+ RETURN_IF_FAIL(BoxObject(sp_data_model_manager.Get(), sp_host_context.Get(),
+ ObjectContext, &sp_boxed_context));
+ RETURN_IF_FAIL(sp_data_model_manager->GetRootNamespace(&sp_root_namespace));
+ RETURN_IF_FAIL(
+ sp_root_namespace->GetKeyValue(L"Debugger", &sp_debugger, nullptr));
+ RETURN_IF_FAIL(sp_debugger->GetKeyValue(L"Sessions", &sp_sessions, nullptr));
+ RETURN_IF_FAIL(
+ GetModelAtIndex(sp_sessions, sp_boxed_context, &sp_curr_session));
+ RETURN_IF_FAIL(
+ sp_curr_session->GetKeyValue(L"Processes", &sp_processes, nullptr));
+ RETURN_IF_FAIL(
+ GetModelAtIndex(sp_processes, sp_boxed_context, &sp_curr_process));
+ RETURN_IF_FAIL(
+ sp_curr_process->GetKeyValue(L"Threads", &sp_threads, nullptr));
+ return GetModelAtIndex(sp_threads, sp_boxed_context, p_current_thread);
+}
diff --git a/deps/v8/tools/v8windbg/base/utilities.h b/deps/v8/tools/v8windbg/base/utilities.h
new file mode 100644
index 0000000000..e26bb28780
--- /dev/null
+++ b/deps/v8/tools/v8windbg/base/utilities.h
@@ -0,0 +1,85 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_BASE_UTILITIES_H_
+#define V8_TOOLS_V8WINDBG_BASE_UTILITIES_H_
+
+#include "tools/v8windbg/base/dbgext.h"
+
+inline const wchar_t* U16ToWChar(const char16_t* p_u16) {
+ static_assert(sizeof(wchar_t) == sizeof(char16_t), "wrong wchar size");
+ return reinterpret_cast<const wchar_t*>(p_u16);
+}
+
+inline const wchar_t* U16ToWChar(std::u16string& str) {
+ return U16ToWChar(str.data());
+}
+
+#if defined(WIN32)
+inline std::u16string ConvertToU16String(std::string utf8_string) {
+ int len_chars =
+ ::MultiByteToWideChar(CP_UTF8, 0, utf8_string.c_str(), -1, nullptr, 0);
+
+ char16_t* p_buff =
+ static_cast<char16_t*>(malloc(len_chars * sizeof(char16_t)));
+
+ // On Windows wchar_t is the same a char16_t
+ static_assert(sizeof(wchar_t) == sizeof(char16_t), "wrong wchar size");
+ len_chars =
+ ::MultiByteToWideChar(CP_UTF8, 0, utf8_string.c_str(), -1,
+ reinterpret_cast<wchar_t*>(p_buff), len_chars);
+ std::u16string result{p_buff};
+ free(p_buff);
+
+ return result;
+}
+#else
+#error String encoding conversion must be provided for the target platform.
+#endif
+
+HRESULT CreateProperty(IDataModelManager* p_manager,
+ IModelPropertyAccessor* p_property,
+ IModelObject** pp_property_object);
+
+HRESULT CreateMethod(IDataModelManager* p_manager, IModelMethod* p_method,
+ IModelObject** pp_method_object);
+
+HRESULT UnboxProperty(IModelObject* object, IModelPropertyAccessor** result);
+
+HRESULT CreateTypedIntrinsic(uint64_t value, IDebugHostType* type,
+ IModelObject** result);
+
+HRESULT CreateULong64(ULONG64 value, IModelObject** pp_int);
+
+HRESULT UnboxULong64(IModelObject* object, ULONG64* value,
+ bool convert = false);
+
+HRESULT CreateInt32(int value, IModelObject** pp_int);
+
+HRESULT CreateUInt32(uint32_t value, IModelObject** pp_int);
+
+HRESULT CreateBool(bool value, IModelObject** pp_val);
+
+HRESULT CreateNumber(double value, IModelObject** pp_val);
+
+HRESULT CreateString(std::u16string value, IModelObject** pp_val);
+
+HRESULT UnboxString(IModelObject* object, BSTR* value);
+
+HRESULT GetModelAtIndex(WRL::ComPtr<IModelObject>& sp_parent,
+ WRL::ComPtr<IModelObject>& sp_index,
+ IModelObject** p_result);
+
+HRESULT GetCurrentThread(WRL::ComPtr<IDebugHostContext>& sp_host_context,
+ IModelObject** p_current_thread);
+
+#define RETURN_IF_FAIL(expression) \
+ do { \
+ HRESULT hr = expression; \
+ if (FAILED(hr)) { \
+ return hr; \
+ } \
+ } while (false)
+
+#endif // V8_TOOLS_V8WINDBG_BASE_UTILITIES_H_
diff --git a/deps/v8/tools/v8windbg/copy-prereqs.py b/deps/v8/tools/v8windbg/copy-prereqs.py
new file mode 100644
index 0000000000..c13efe6582
--- /dev/null
+++ b/deps/v8/tools/v8windbg/copy-prereqs.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program copies dbgeng.dll from the Windows SDK to the output directory,
+so that we can test v8windbg. (The version of dbgeng.dll in system32, which
+would be loaded otherwise, is insufficient.)
+Arguments:
+1. The directory that contains vs_toolchain.py
+2. The directory to copy to
+3. The cpu type for this build
+"""
+
+import sys
+import os
+
+vs_toolchain_dir, target_dir, target_cpu = sys.argv[1:]
+
+sys.path.insert(0, vs_toolchain_dir)
+import vs_toolchain
+
+def CopyDebuggerFile(debug_file):
+ win_sdk_dir = vs_toolchain.SetEnvironmentAndGetSDKDir()
+ if not win_sdk_dir:
+ return
+
+ full_path = os.path.join(win_sdk_dir, 'Debuggers', target_cpu, debug_file)
+ if not os.path.exists(full_path):
+ return
+
+ target_path = os.path.join(target_dir, debug_file)
+ vs_toolchain._CopyRuntimeImpl(target_path, full_path, verbose=False)
+
+ # Ninja expects the file's timestamp to be newer than this script.
+ os.utime(target_path, None)
+
+CopyDebuggerFile('dbgeng.dll')
diff --git a/deps/v8/tools/v8windbg/src/cur-isolate.cc b/deps/v8/tools/v8windbg/src/cur-isolate.cc
new file mode 100644
index 0000000000..f39098f686
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/cur-isolate.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/cur-isolate.h"
+
+HRESULT GetIsolateKey(WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ int* isolate_key) {
+ auto sp_v8_module = Extension::Current()->GetV8Module(sp_ctx);
+ if (sp_v8_module == nullptr) return E_FAIL;
+
+ WRL::ComPtr<IDebugHostSymbol> sp_isolate_sym;
+ RETURN_IF_FAIL(sp_v8_module->FindSymbolByName(kIsolateKey, &sp_isolate_sym));
+ SymbolKind kind;
+ RETURN_IF_FAIL(sp_isolate_sym->GetSymbolKind(&kind));
+ if (kind != SymbolData) return E_FAIL;
+ WRL::ComPtr<IDebugHostData> sp_isolate_key_data;
+ RETURN_IF_FAIL(sp_isolate_sym.As(&sp_isolate_key_data));
+ Location loc;
+ RETURN_IF_FAIL(sp_isolate_key_data->GetLocation(&loc));
+ ULONG64 bytes_read;
+ RETURN_IF_FAIL(sp_debug_host_memory->ReadBytes(
+ sp_ctx.Get(), loc, isolate_key, sizeof(isolate_key), &bytes_read));
+ return S_OK;
+}
+
+HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result) {
+ sp_result = nullptr;
+
+ // Get the current context
+ WRL::ComPtr<IDebugHostContext> sp_host_context;
+ RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_host_context));
+
+ WRL::ComPtr<IModelObject> sp_curr_thread;
+ RETURN_IF_FAIL(GetCurrentThread(sp_host_context, &sp_curr_thread));
+
+ WRL::ComPtr<IModelObject> sp_environment, sp_environment_block;
+ WRL::ComPtr<IModelObject> sp_tls_slots, sp_slot_index, sp_isolate_ptr;
+ RETURN_IF_FAIL(
+ sp_curr_thread->GetKeyValue(L"Environment", &sp_environment, nullptr));
+
+ RETURN_IF_FAIL(sp_environment->GetKeyValue(L"EnvironmentBlock",
+ &sp_environment_block, nullptr));
+
+ // EnvironmentBlock and TlsSlots are native types (TypeUDT) and thus
+ // GetRawValue rather than GetKeyValue should be used to get field (member)
+ // values.
+ ModelObjectKind kind;
+ RETURN_IF_FAIL(sp_environment_block->GetKind(&kind));
+ if (kind != ModelObjectKind::ObjectTargetObject) return E_FAIL;
+
+ RETURN_IF_FAIL(sp_environment_block->GetRawValue(SymbolField, L"TlsSlots", 0,
+ &sp_tls_slots));
+
+ int isolate_key = -1;
+ RETURN_IF_FAIL(GetIsolateKey(sp_host_context, &isolate_key));
+ RETURN_IF_FAIL(CreateInt32(isolate_key, &sp_slot_index));
+
+ RETURN_IF_FAIL(GetModelAtIndex(sp_tls_slots, sp_slot_index, &sp_isolate_ptr));
+
+ // Need to dereference the slot and then get the address held in it
+ WRL::ComPtr<IModelObject> sp_dereferenced_slot;
+ RETURN_IF_FAIL(sp_isolate_ptr->Dereference(&sp_dereferenced_slot));
+
+ uint64_t isolate_ptr;
+ RETURN_IF_FAIL(UnboxULong64(sp_dereferenced_slot.Get(), &isolate_ptr));
+ Location isolate_addr{isolate_ptr};
+
+ // If we got the isolate_key OK, then must have the V8 module loaded
+ // Get the internal Isolate type from it
+ WRL::ComPtr<IDebugHostType> sp_isolate_type, sp_isolate_ptr_type;
+ RETURN_IF_FAIL(Extension::Current()
+ ->GetV8Module(sp_host_context)
+ ->FindTypeByName(kIsolate, &sp_isolate_type));
+ RETURN_IF_FAIL(
+ sp_isolate_type->CreatePointerTo(PointerStandard, &sp_isolate_ptr_type));
+
+ RETURN_IF_FAIL(sp_data_model_manager->CreateTypedObject(
+ sp_host_context.Get(), isolate_addr, sp_isolate_type.Get(), &sp_result));
+
+ return S_OK;
+}
+
+IFACEMETHODIMP CurrIsolateAlias::Call(IModelObject* p_context_object,
+ ULONG64 arg_count,
+ IModelObject** pp_arguments,
+ IModelObject** pp_result,
+ IKeyStore** pp_metadata) noexcept {
+ *pp_result = nullptr;
+ WRL::ComPtr<IModelObject> sp_result;
+ RETURN_IF_FAIL(GetCurrentIsolate(sp_result));
+ *pp_result = sp_result.Detach();
+ return S_OK;
+}
diff --git a/deps/v8/tools/v8windbg/src/cur-isolate.h b/deps/v8/tools/v8windbg/src/cur-isolate.h
new file mode 100644
index 0000000000..2be24ce7fd
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/cur-isolate.h
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_CUR_ISOLATE_H_
+#define V8_TOOLS_V8WINDBG_SRC_CUR_ISOLATE_H_
+
+#include <crtdbg.h>
+#include <wrl/implements.h>
+
+#include <string>
+#include <vector>
+
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result);
+
+constexpr wchar_t kIsolateKey[] = L"isolate_key_";
+constexpr wchar_t kIsolate[] = L"v8::internal::Isolate";
+
+class CurrIsolateAlias
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelMethod> {
+ public:
+ IFACEMETHOD(Call)
+ (IModelObject* p_context_object, ULONG64 arg_count,
+ _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+ IKeyStore** pp_metadata);
+};
+
+#endif // V8_TOOLS_V8WINDBG_SRC_CUR_ISOLATE_H_
diff --git a/deps/v8/tools/v8windbg/src/list-chunks.cc b/deps/v8/tools/v8windbg/src/list-chunks.cc
new file mode 100644
index 0000000000..90b3ff6af6
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/list-chunks.cc
@@ -0,0 +1,238 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/list-chunks.h"
+
+#include "tools/v8windbg/src/cur-isolate.h"
+
+// v8windbg!ListChunksAlias::Call
+IFACEMETHODIMP ListChunksAlias::Call(IModelObject* p_context_object,
+ ULONG64 arg_count,
+ _In_reads_(arg_count)
+ IModelObject** pp_arguments,
+ IModelObject** pp_result,
+ IKeyStore** pp_metadata) noexcept {
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_ctx));
+
+ WRL::ComPtr<IModelObject> result;
+ RETURN_IF_FAIL(
+ sp_data_model_manager->CreateSyntheticObject(sp_ctx.Get(), &result));
+
+ auto sp_iterator{WRL::Make<MemoryChunks>()};
+
+ RETURN_IF_FAIL(result->SetConcept(
+ __uuidof(IIndexableConcept),
+ static_cast<IIndexableConcept*>(sp_iterator.Get()), nullptr));
+ RETURN_IF_FAIL(result->SetConcept(
+ __uuidof(IIterableConcept),
+ static_cast<IIterableConcept*>(sp_iterator.Get()), nullptr));
+
+ *pp_result = result.Detach();
+ if (pp_metadata) {
+ *pp_metadata = nullptr;
+ }
+ return S_OK;
+}
+
+ChunkData::ChunkData() = default;
+ChunkData::~ChunkData() = default;
+ChunkData::ChunkData(const ChunkData&) = default;
+ChunkData::ChunkData(ChunkData&&) = default;
+ChunkData& ChunkData::operator=(const ChunkData&) = default;
+ChunkData& ChunkData::operator=(ChunkData&&) = default;
+
+MemoryChunkIterator::MemoryChunkIterator(
+ WRL::ComPtr<IDebugHostContext>& host_context)
+ : sp_ctx_(host_context) {}
+MemoryChunkIterator::~MemoryChunkIterator() = default;
+
+HRESULT MemoryChunkIterator::PopulateChunkData() {
+ WRL::ComPtr<IModelObject> sp_isolate, sp_heap, sp_space;
+ chunks_.clear();
+
+ RETURN_IF_FAIL(GetCurrentIsolate(sp_isolate));
+
+ RETURN_IF_FAIL(
+ sp_isolate->GetRawValue(SymbolField, L"heap_", RawSearchNone, &sp_heap));
+ RETURN_IF_FAIL(
+ sp_heap->GetRawValue(SymbolField, L"space_", RawSearchNone, &sp_space));
+
+ WRL::ComPtr<IDebugHostType> sp_space_type;
+ RETURN_IF_FAIL(sp_space->GetTypeInfo(&sp_space_type));
+
+ // Iterate over the array of Space pointers
+ WRL::ComPtr<IIterableConcept> sp_iterable;
+ RETURN_IF_FAIL(
+ sp_space->GetConcept(__uuidof(IIterableConcept), &sp_iterable, nullptr));
+
+ WRL::ComPtr<IModelIterator> sp_space_iterator;
+ RETURN_IF_FAIL(sp_iterable->GetIterator(sp_space.Get(), &sp_space_iterator));
+
+ // Loop through all the spaces in the array
+ WRL::ComPtr<IModelObject> sp_space_ptr;
+ while (sp_space_iterator->GetNext(&sp_space_ptr, 0, nullptr, nullptr) !=
+ E_BOUNDS) {
+ // Should have gotten a "v8::internal::Space *". Dereference, then get field
+ // "memory_chunk_list_" [Type: v8::base::List<v8::internal::MemoryChunk>]
+ WRL::ComPtr<IModelObject> sp_space, sp_chunk_list, sp_mem_chunk_ptr,
+ sp_mem_chunk;
+ RETURN_IF_FAIL(sp_space_ptr->Dereference(&sp_space));
+ RETURN_IF_FAIL(sp_space->GetRawValue(SymbolField, L"memory_chunk_list_",
+ RawSearchNone, &sp_chunk_list));
+
+ // Then get field "front_" [Type: v8::internal::MemoryChunk *]
+ RETURN_IF_FAIL(sp_chunk_list->GetRawValue(
+ SymbolField, L"front_", RawSearchNone, &sp_mem_chunk_ptr));
+
+ // Loop here on the list of MemoryChunks for the space
+ while (true) {
+ // See if it is a nullptr (i.e. no chunks in this space)
+ uint64_t front_val;
+ RETURN_IF_FAIL(
+ UnboxULong64(sp_mem_chunk_ptr.Get(), &front_val, true /*convert*/));
+ if (front_val == 0) {
+ break;
+ }
+
+ // Dereference and get fields "area_start_" and "area_end_" (both uint64)
+ RETURN_IF_FAIL(sp_mem_chunk_ptr->Dereference(&sp_mem_chunk));
+
+ WRL::ComPtr<IModelObject> sp_start, sp_end;
+ RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"area_start_",
+ RawSearchNone, &sp_start));
+ RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"area_end_",
+ RawSearchNone, &sp_end));
+
+ ChunkData chunk_entry;
+ chunk_entry.area_start = sp_start;
+ chunk_entry.area_end = sp_end;
+ chunk_entry.space = sp_space;
+ chunks_.push_back(chunk_entry);
+
+ // Follow the list_node_.next_ to the next memory chunk
+ WRL::ComPtr<IModelObject> sp_list_node;
+ RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"list_node_",
+ RawSearchNone, &sp_list_node));
+
+ sp_mem_chunk_ptr = nullptr;
+ sp_mem_chunk = nullptr;
+ RETURN_IF_FAIL(sp_list_node->GetRawValue(
+ SymbolField, L"next_", RawSearchNone, &sp_mem_chunk_ptr));
+ // Top of the loop will check if this is a nullptr and exit if so
+ }
+ sp_space_ptr = nullptr;
+ }
+
+ return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunkIterator::Reset() noexcept {
+ position_ = 0;
+ return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunkIterator::GetNext(IModelObject** object,
+ ULONG64 dimensions,
+ IModelObject** indexers,
+ IKeyStore** metadata) noexcept {
+ if (dimensions > 1) return E_INVALIDARG;
+
+ if (position_ == 0) {
+ RETURN_IF_FAIL(PopulateChunkData());
+ }
+
+ if (metadata != nullptr) *metadata = nullptr;
+
+ WRL::ComPtr<IModelObject> sp_index, sp_value;
+
+ if (dimensions == 1) {
+ RETURN_IF_FAIL(CreateULong64(position_, &sp_index));
+ }
+
+ RETURN_IF_FAIL(GetAt(position_, &sp_value));
+
+ // Now update counter and transfer ownership of results, because nothing can
+ // fail from this point onward.
+ ++position_;
+ if (dimensions == 1) {
+ *indexers = sp_index.Detach();
+ }
+ *object = sp_value.Detach();
+ return S_OK;
+}
+
+HRESULT MemoryChunkIterator::GetAt(uint64_t index,
+ IModelObject** result) const {
+ if (index >= chunks_.size()) return E_BOUNDS;
+
+ // Create the synthetic object representing the chunk here
+ const ChunkData& curr_chunk = chunks_.at(index);
+ WRL::ComPtr<IModelObject> sp_value;
+ RETURN_IF_FAIL(
+ sp_data_model_manager->CreateSyntheticObject(sp_ctx_.Get(), &sp_value));
+ RETURN_IF_FAIL(
+ sp_value->SetKey(L"area_start", curr_chunk.area_start.Get(), nullptr));
+ RETURN_IF_FAIL(
+ sp_value->SetKey(L"area_end", curr_chunk.area_end.Get(), nullptr));
+ RETURN_IF_FAIL(sp_value->SetKey(L"space", curr_chunk.space.Get(), nullptr));
+
+ *result = sp_value.Detach();
+ return S_OK;
+}
+
+MemoryChunks::MemoryChunks() = default;
+MemoryChunks::~MemoryChunks() = default;
+
+IFACEMETHODIMP MemoryChunks::GetDimensionality(
+ IModelObject* context_object, ULONG64* dimensionality) noexcept {
+ *dimensionality = 1;
+ return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunks::GetAt(IModelObject* context_object,
+ ULONG64 indexer_count,
+ IModelObject** indexers,
+ IModelObject** object,
+ IKeyStore** metadata) noexcept {
+ if (indexer_count != 1) return E_INVALIDARG;
+ if (metadata != nullptr) *metadata = nullptr;
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+
+ // This should be instantiated once for each synthetic object returned,
+ // so should be able to cache/reuse an iterator
+ if (opt_chunks_ == nullptr) {
+ opt_chunks_ = WRL::Make<MemoryChunkIterator>(sp_ctx);
+ _ASSERT(opt_chunks_ != nullptr);
+ RETURN_IF_FAIL(opt_chunks_->PopulateChunkData());
+ }
+
+ uint64_t index;
+ RETURN_IF_FAIL(UnboxULong64(indexers[0], &index, true /*convert*/));
+
+ return opt_chunks_->GetAt(index, object);
+}
+
+IFACEMETHODIMP MemoryChunks::SetAt(IModelObject* context_object,
+ ULONG64 indexer_count,
+ IModelObject** indexers,
+ IModelObject* value) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP MemoryChunks::GetDefaultIndexDimensionality(
+ IModelObject* context_object, ULONG64* dimensionality) noexcept {
+ *dimensionality = 1;
+ return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunks::GetIterator(IModelObject* context_object,
+ IModelIterator** iterator) noexcept {
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+ auto sp_memory_iterator{WRL::Make<MemoryChunkIterator>(sp_ctx)};
+ *iterator = sp_memory_iterator.Detach();
+ return S_OK;
+}
diff --git a/deps/v8/tools/v8windbg/src/list-chunks.h b/deps/v8/tools/v8windbg/src/list-chunks.h
new file mode 100644
index 0000000000..10eec100d1
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/list-chunks.h
@@ -0,0 +1,100 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
+#define V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
+
+#include <crtdbg.h>
+#include <wrl/implements.h>
+
+#include <optional>
+#include <string>
+#include <vector>
+
+#include "src/base/optional.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+class ListChunksAlias
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelMethod> {
+ public:
+ IFACEMETHOD(Call)
+ (IModelObject* p_context_object, ULONG64 arg_count,
+ _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+ IKeyStore** pp_metadata);
+};
+
+struct ChunkData {
+ ChunkData();
+ ~ChunkData();
+ ChunkData(const ChunkData&);
+ ChunkData(ChunkData&&);
+ ChunkData& operator=(const ChunkData&);
+ ChunkData& operator=(ChunkData&&);
+ WRL::ComPtr<IModelObject> area_start;
+ WRL::ComPtr<IModelObject> area_end;
+ WRL::ComPtr<IModelObject> space;
+};
+
+class MemoryChunkIterator
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelIterator> {
+ public:
+ MemoryChunkIterator(WRL::ComPtr<IDebugHostContext>& host_context);
+ ~MemoryChunkIterator() override;
+
+ HRESULT PopulateChunkData();
+
+ IFACEMETHOD(Reset)();
+
+ IFACEMETHOD(GetNext)
+ (IModelObject** object, ULONG64 dimensions, IModelObject** indexers,
+ IKeyStore** metadata);
+
+ const std::vector<ChunkData>& GetChunks() const { return chunks_; }
+
+ HRESULT GetAt(uint64_t index, IModelObject** result) const;
+
+ private:
+ ULONG position_ = 0;
+ std::vector<ChunkData> chunks_;
+ WRL::ComPtr<IDebugHostContext> sp_ctx_;
+};
+
+class MemoryChunks
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IIndexableConcept, IIterableConcept> {
+ public:
+ MemoryChunks();
+ ~MemoryChunks() override;
+
+ // IIndexableConcept members
+ IFACEMETHOD(GetDimensionality)
+ (IModelObject* context_object, ULONG64* dimensionality);
+
+ IFACEMETHOD(GetAt)
+ (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+ IModelObject** object, IKeyStore** metadata);
+
+ IFACEMETHOD(SetAt)
+ (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+ IModelObject* value);
+
+ // IIterableConcept
+ IFACEMETHOD(GetDefaultIndexDimensionality)
+ (IModelObject* context_object, ULONG64* dimensionality);
+
+ IFACEMETHOD(GetIterator)
+ (IModelObject* context_object, IModelIterator** iterator);
+
+ private:
+ WRL::ComPtr<MemoryChunkIterator> opt_chunks_;
+};
+
+#endif // V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
diff --git a/deps/v8/tools/v8windbg/src/local-variables.cc b/deps/v8/tools/v8windbg/src/local-variables.cc
new file mode 100644
index 0000000000..20814cf28a
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/local-variables.cc
@@ -0,0 +1,120 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/local-variables.h"
+
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+V8LocalVariables::V8LocalVariables(WRL::ComPtr<IModelPropertyAccessor> original,
+ bool is_parameters)
+ : original_(original), is_parameters_(is_parameters) {}
+V8LocalVariables::~V8LocalVariables() = default;
+
+IFACEMETHODIMP V8LocalVariables::GetValue(PCWSTR key, IModelObject* context,
+ IModelObject** value) noexcept {
+ // See if the frame can fetch locals based on symbols. If so, it's a normal
+ // C++ frame, so we can be done.
+ HRESULT original_hr = original_->GetValue(key, context, value);
+ if (SUCCEEDED(original_hr)) return original_hr;
+
+ // Next, try to find out about the instruction pointer. If it is within the V8
+ // module, or points to unknown space outside a module (generated code), then
+ // we're interested. Otherwise, we have nothing useful to do.
+ WRL::ComPtr<IModelObject> attributes;
+ RETURN_IF_FAIL(context->GetKeyValue(L"Attributes", &attributes, nullptr));
+ WRL::ComPtr<IModelObject> boxed_instruction_offset;
+ RETURN_IF_FAIL(attributes->GetKeyValue(L"InstructionOffset",
+ &boxed_instruction_offset, nullptr));
+ ULONG64 instruction_offset{};
+ RETURN_IF_FAIL(
+ UnboxULong64(boxed_instruction_offset.Get(), &instruction_offset));
+ WRL::ComPtr<IDebugHostSymbols> symbols;
+ RETURN_IF_FAIL(sp_debug_host.As(&symbols));
+ WRL::ComPtr<IDebugHostContext> host_context;
+ RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&host_context));
+ WRL::ComPtr<IDebugHostModule> module;
+ if (SUCCEEDED(symbols->FindModuleByLocation(host_context.Get(),
+ instruction_offset, &module))) {
+ Location module_base;
+ RETURN_IF_FAIL(module->GetBaseLocation(&module_base));
+ WRL::ComPtr<IDebugHostModule> v8_module =
+ Extension::Current()->GetV8Module(host_context);
+ if (v8_module == nullptr) {
+ // Anything in a module must not be in the V8 module if the V8 module
+ // doesn't exist.
+ return original_hr;
+ }
+ Location v8_base;
+ RETURN_IF_FAIL(v8_module->GetBaseLocation(&v8_base));
+ if (module_base != v8_base) {
+ // It's in a module, but not the one that contains V8.
+ return original_hr;
+ }
+ }
+
+ // Initialize an empty result object.
+ WRL::ComPtr<IModelObject> result;
+ RETURN_IF_FAIL(sp_data_model_manager->CreateSyntheticObject(
+ host_context.Get(), &result));
+ WRL::ComPtr<IModelObject> parent_model;
+ RETURN_IF_FAIL(sp_data_model_manager->AcquireNamedModel(
+ is_parameters_ ? L"Debugger.Models.Parameters"
+ : L"Debugger.Models.LocalVariables",
+ &parent_model));
+ RETURN_IF_FAIL(result->AddParentModel(parent_model.Get(), /*context=*/nullptr,
+ /*override=*/false));
+
+ if (is_parameters_) {
+ // We're not actually adding any parameters data yet; we just need it to not
+ // fail so that the locals pane displays the LocalVariables. The locals pane
+ // displays nothing if getting either LocalVariables or Parameters fails.
+ *value = result.Detach();
+ return S_OK;
+ }
+
+ // Get the stack and frame pointers for the current frame.
+ WRL::ComPtr<IModelObject> boxed_stack_offset;
+ RETURN_IF_FAIL(
+ attributes->GetKeyValue(L"StackOffset", &boxed_stack_offset, nullptr));
+ ULONG64 stack_offset{};
+ RETURN_IF_FAIL(UnboxULong64(boxed_stack_offset.Get(), &stack_offset));
+ WRL::ComPtr<IModelObject> boxed_frame_offset;
+ RETURN_IF_FAIL(
+ attributes->GetKeyValue(L"FrameOffset", &boxed_frame_offset, nullptr));
+ ULONG64 frame_offset{};
+ RETURN_IF_FAIL(UnboxULong64(boxed_frame_offset.Get(), &frame_offset));
+
+ // Eventually v8_debug_helper will provide some help here, but for now, just
+ // provide the option to view the whole stack frame as tagged data. It can
+ // be somewhat useful.
+ WRL::ComPtr<IDebugHostType> object_type =
+ Extension::Current()->GetV8ObjectType(host_context);
+ if (object_type == nullptr) {
+ // There's nothing useful to do if we can't find the symbol for
+ // v8::internal::Object.
+ return original_hr;
+ }
+ ULONG64 object_size{};
+ RETURN_IF_FAIL(object_type->GetSize(&object_size));
+ ULONG64 num_objects = (frame_offset - stack_offset) / object_size;
+ ArrayDimension dimensions[] = {
+ {/*start=*/0, /*length=*/num_objects, /*stride=*/object_size}};
+ WRL::ComPtr<IDebugHostType> object_array_type;
+ RETURN_IF_FAIL(object_type->CreateArrayOf(/*dimensions=*/1, dimensions,
+ &object_array_type));
+ WRL::ComPtr<IModelObject> array;
+ RETURN_IF_FAIL(sp_data_model_manager->CreateTypedObject(
+ host_context.Get(), stack_offset, object_array_type.Get(), &array));
+ RETURN_IF_FAIL(
+ result->SetKey(L"memory interpreted as Objects", array.Get(), nullptr));
+
+ *value = result.Detach();
+ return S_OK;
+}
+
+IFACEMETHODIMP V8LocalVariables::SetValue(PCWSTR key, IModelObject* context,
+ IModelObject* value) noexcept {
+ return E_NOTIMPL;
+}
diff --git a/deps/v8/tools/v8windbg/src/local-variables.h b/deps/v8/tools/v8windbg/src/local-variables.h
new file mode 100644
index 0000000000..169a93a578
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/local-variables.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_LOCAL_VARIABLES_H_
+#define V8_TOOLS_V8WINDBG_SRC_LOCAL_VARIABLES_H_
+
+#include <comutil.h>
+#include <wrl/implements.h>
+
+#include "tools/v8windbg/base/dbgext.h"
+
+// An implementation of the property accessor for the "LocalVariables" or
+// "Parameters" property on Debugger.Models.StackFrame. This allows us to modify
+// the variables shown in each frame.
+class V8LocalVariables
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelPropertyAccessor> {
+ public:
+ V8LocalVariables(WRL::ComPtr<IModelPropertyAccessor> original,
+ bool is_parameters);
+ ~V8LocalVariables() override;
+
+ IFACEMETHOD(GetValue)
+ (PCWSTR key, IModelObject* context, IModelObject** value);
+ IFACEMETHOD(SetValue)(PCWSTR key, IModelObject* context, IModelObject* value);
+
+ private:
+ // The built-in accessor which we are overriding.
+ WRL::ComPtr<IModelPropertyAccessor> original_;
+ // Whether this is for Parameters rather than LocalVariables.
+ bool is_parameters_;
+};
+
+#endif // V8_TOOLS_V8WINDBG_SRC_LOCAL_VARIABLES_H_
diff --git a/deps/v8/tools/v8windbg/src/object-inspection.cc b/deps/v8/tools/v8windbg/src/object-inspection.cc
new file mode 100644
index 0000000000..ce0370a697
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/object-inspection.cc
@@ -0,0 +1,622 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/object-inspection.h"
+
+#include "src/flags/flags.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+V8CachedObject::V8CachedObject(Location location,
+ std::string uncompressed_type_name,
+ WRL::ComPtr<IDebugHostContext> context,
+ bool is_compressed)
+ : location_(std::move(location)),
+ uncompressed_type_name_(std::move(uncompressed_type_name)),
+ context_(std::move(context)),
+ is_compressed_(is_compressed) {}
+HRESULT V8CachedObject::Create(IModelObject* p_v8_object_instance,
+ IV8CachedObject** result) {
+ Location location;
+ RETURN_IF_FAIL(p_v8_object_instance->GetLocation(&location));
+
+ WRL::ComPtr<IDebugHostContext> context;
+ RETURN_IF_FAIL(p_v8_object_instance->GetContext(&context));
+
+ // If the object is of type v8::internal::TaggedValue, and this build uses
+ // compressed pointers, then the value is compressed. Other types such as
+ // v8::internal::Object represent uncompressed tagged values.
+ WRL::ComPtr<IDebugHostType> sp_type;
+ _bstr_t type_name;
+ bool is_compressed =
+ COMPRESS_POINTERS_BOOL &&
+ SUCCEEDED(p_v8_object_instance->GetTypeInfo(&sp_type)) &&
+ SUCCEEDED(sp_type->GetName(type_name.GetAddress())) &&
+ static_cast<const char*>(type_name) == std::string(kTaggedValue);
+
+ const char* uncompressed_type_name =
+ is_compressed ? kObject : static_cast<const char*>(type_name);
+
+ *result = WRL::Make<V8CachedObject>(location, uncompressed_type_name, context,
+ is_compressed)
+ .Detach();
+ return S_OK;
+}
+V8CachedObject::V8CachedObject(V8HeapObject heap_object)
+ : heap_object_(std::move(heap_object)), heap_object_initialized_(true) {}
+
+V8CachedObject::~V8CachedObject() = default;
+
+IFACEMETHODIMP V8CachedObject::GetCachedV8HeapObject(
+ V8HeapObject** pp_heap_object) noexcept {
+ if (!heap_object_initialized_) {
+ heap_object_initialized_ = true;
+ uint64_t tagged_ptr = 0;
+ uint64_t bytes_read;
+ HRESULT hr = sp_debug_host_memory->ReadBytes(
+ context_.Get(), location_, reinterpret_cast<void*>(&tagged_ptr),
+ is_compressed_ ? i::kTaggedSize : sizeof(void*), &bytes_read);
+ // S_FALSE can be returned if fewer bytes were read than were requested. We
+ // need all of the bytes, so check for S_OK.
+ if (hr != S_OK) {
+ std::stringstream message;
+ message << "Unable to read memory";
+ if (location_.IsVirtualAddress()) {
+ message << " at 0x" << std::hex << location_.GetOffset();
+ }
+ heap_object_.friendly_name = ConvertToU16String(message.str());
+ } else {
+ if (is_compressed_)
+ tagged_ptr = ExpandCompressedPointer(static_cast<uint32_t>(tagged_ptr));
+ heap_object_ =
+ ::GetHeapObject(context_, tagged_ptr, location_.GetOffset(),
+ uncompressed_type_name_.c_str(), is_compressed_);
+ }
+ }
+ *pp_heap_object = &this->heap_object_;
+ return S_OK;
+}
+
+IndexedFieldData::IndexedFieldData(Property property)
+ : property_(std::move(property)) {}
+
+IndexedFieldData::~IndexedFieldData() = default;
+
+IFACEMETHODIMP IndexedFieldData::GetProperty(Property** property) noexcept {
+ if (!property) return E_POINTER;
+ *property = &this->property_;
+ return S_OK;
+}
+
+V8ObjectKeyEnumerator::V8ObjectKeyEnumerator(
+ WRL::ComPtr<IV8CachedObject>& v8_cached_object)
+ : sp_v8_cached_object_{v8_cached_object} {}
+V8ObjectKeyEnumerator::~V8ObjectKeyEnumerator() = default;
+
+IFACEMETHODIMP V8ObjectKeyEnumerator::Reset() noexcept {
+ index_ = 0;
+ return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectKeyEnumerator::GetNext(BSTR* key, IModelObject** value,
+ IKeyStore** metadata) noexcept {
+ V8HeapObject* p_v8_heap_object;
+ sp_v8_cached_object_->GetCachedV8HeapObject(&p_v8_heap_object);
+
+ if (static_cast<size_t>(index_) >= p_v8_heap_object->properties.size())
+ return E_BOUNDS;
+
+ auto* name_ptr = p_v8_heap_object->properties[index_].name.c_str();
+ *key = ::SysAllocString(U16ToWChar(name_ptr));
+ ++index_;
+ return S_OK;
+}
+
+IFACEMETHODIMP V8LocalDataModel::InitializeObject(
+ IModelObject* model_object,
+ IDebugHostTypeSignature* matching_type_signature,
+ IDebugHostSymbolEnumerator* wildcard_matches) noexcept {
+ return S_OK;
+}
+
+IFACEMETHODIMP V8LocalDataModel::GetName(BSTR* model_name) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::InitializeObject(
+ IModelObject* model_object,
+ IDebugHostTypeSignature* matching_type_signature,
+ IDebugHostSymbolEnumerator* wildcard_matches) noexcept {
+ return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::GetName(BSTR* model_name) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::ToDisplayString(
+ IModelObject* context_object, IKeyStore* metadata,
+ BSTR* display_string) noexcept {
+ WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+ RETURN_IF_FAIL(GetCachedObject(context_object, &sp_v8_cached_object));
+ V8HeapObject* p_v8_heap_object;
+ RETURN_IF_FAIL(sp_v8_cached_object->GetCachedV8HeapObject(&p_v8_heap_object));
+ *display_string = ::SysAllocString(
+ reinterpret_cast<const wchar_t*>(p_v8_heap_object->friendly_name.data()));
+ return S_OK;
+}
+
+namespace {
+
+// Creates a synthetic object, attaches a parent model, and sets the context
+// object for that parent data model. Caller is responsible for ensuring that
+// the parent model's Concepts have been initialized correctly and that the
+// data model context is of an appropriate type for the parent model.
+HRESULT CreateSyntheticObjectWithParentAndDataContext(
+ IDebugHostContext* ctx, IModelObject* parent_model, IUnknown* data_context,
+ IModelObject** result) {
+ WRL::ComPtr<IModelObject> value;
+ RETURN_IF_FAIL(sp_data_model_manager->CreateSyntheticObject(ctx, &value));
+ RETURN_IF_FAIL(
+ value->AddParentModel(parent_model, nullptr, true /*override*/));
+ RETURN_IF_FAIL(value->SetContextForDataModel(parent_model, data_context));
+ *result = value.Detach();
+ return S_OK;
+}
+
+// Creates an IModelObject for a V8 object whose value is represented by the
+// data in cached_object. This is an alternative to CreateTypedObject for
+// particularly complex cases (compressed values and those that don't exist
+// anywhere in memory).
+HRESULT CreateSyntheticObjectForV8Object(IDebugHostContext* ctx,
+ V8CachedObject* cached_object,
+ IModelObject** result) {
+ // Explicitly add the parent model and data context. On a plain typed object,
+ // the parent model would be attached automatically because we registered for
+ // a matching type signature, and the data context would be set during
+ // V8ObjectDataModel::GetCachedObject.
+ return CreateSyntheticObjectWithParentAndDataContext(
+ ctx, Extension::Current()->GetObjectDataModel(), cached_object, result);
+}
+
+// Creates an IModelObject to represent a field that is not a struct or array.
+HRESULT GetModelForBasicField(const uint64_t address,
+ const std::u16string& type_name,
+ const std::string& uncompressed_type_name,
+ WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ IModelObject** result) {
+ if (type_name == ConvertToU16String(uncompressed_type_name)) {
+ // For untagged and uncompressed tagged fields, create an IModelObject
+ // representing a normal native data type.
+ WRL::ComPtr<IDebugHostType> type =
+ Extension::Current()->GetTypeFromV8Module(sp_ctx, type_name.c_str());
+ if (type == nullptr) return E_FAIL;
+ return sp_data_model_manager->CreateTypedObject(
+ sp_ctx.Get(), Location{address}, type.Get(), result);
+ }
+
+ // For compressed tagged fields, we need to do something a little more
+ // complicated. We could just use CreateTypedObject with the type
+ // v8::internal::TaggedValue, but then we'd sacrifice any other data
+ // that we've learned about the field's specific type. So instead we
+ // create a synthetic object.
+ WRL::ComPtr<V8CachedObject> cached_object = WRL::Make<V8CachedObject>(
+ Location(address), uncompressed_type_name, sp_ctx,
+ /*is_compressed=*/true);
+ return CreateSyntheticObjectForV8Object(sp_ctx.Get(), cached_object.Get(),
+ result);
+}
+
+// Creates an IModelObject representing the value of a bitfield.
+HRESULT GetModelForBitField(uint64_t address, const uint8_t num_bits,
+ uint8_t shift_bits, const std::u16string& type_name,
+ WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ IModelObject** result) {
+ // Look up the type by name.
+ WRL::ComPtr<IDebugHostType> type =
+ Extension::Current()->GetTypeFromV8Module(sp_ctx, type_name.c_str());
+ if (type == nullptr) return E_FAIL;
+
+ // Figure out exactly which bytes contain the bitfield's data. This depends on
+ // platform byte order (little-endian for Windows).
+ constexpr int kBitsPerByte = 8;
+ uint8_t shift_bytes = shift_bits / kBitsPerByte;
+ address += shift_bytes;
+ shift_bits -= shift_bytes * kBitsPerByte;
+ size_t bits_to_read = shift_bits + num_bits;
+ size_t bytes_to_read = (bits_to_read + kBitsPerByte - 1) / kBitsPerByte;
+
+ uintptr_t value = 0;
+
+ // V8 guarantees that bitfield structs are no bigger than a single pointer.
+ if (bytes_to_read > sizeof(value)) {
+ std::stringstream message;
+ message << "Fatal v8windbg error: found bitfield struct of "
+ << bytes_to_read << "bytes, which exceeds the supported size of "
+ << sizeof(value);
+ return CreateString(ConvertToU16String(message.str()), result);
+ }
+
+ uint64_t bytes_read;
+ HRESULT hr = sp_debug_host_memory->ReadBytes(sp_ctx.Get(), address,
+ reinterpret_cast<void*>(&value),
+ bytes_to_read, &bytes_read);
+
+ // S_FALSE can be returned if fewer bytes were read than were requested. We
+ // need all of the bytes, so check for S_OK.
+ if (hr != S_OK) {
+ std::stringstream message;
+ message << "Unable to read memory at 0x" << std::hex << address;
+ return CreateString(ConvertToU16String(message.str()), result);
+ }
+
+ // Decode the bitfield.
+ value = (value >> shift_bits) & ((1 << num_bits) - 1);
+
+ return CreateTypedIntrinsic(value, type.Get(), result);
+}
+
+// Creates an IModelObject to represent the packed fields in a Torque struct.
+// Note that Torque structs are not C++ structs and do not have any type
+// definitions in the V8 symbols.
+HRESULT GetModelForStruct(const uint64_t address,
+ const std::vector<StructField>& fields,
+ WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ IModelObject** result) {
+ WRL::ComPtr<IModelObject> sp_value;
+ RETURN_IF_FAIL(
+ sp_data_model_manager->CreateSyntheticObject(sp_ctx.Get(), &sp_value));
+
+ // There's no need for any fancy Concepts here; just add key-value pairs for
+ // each field.
+ for (const StructField& field : fields) {
+ WRL::ComPtr<IModelObject> field_model;
+ if (field.num_bits == 0) {
+ RETURN_IF_FAIL(GetModelForBasicField(
+ address + field.offset, field.type_name, field.uncompressed_type_name,
+ sp_ctx, &field_model));
+ } else {
+ RETURN_IF_FAIL(GetModelForBitField(address + field.offset, field.num_bits,
+ field.shift_bits, field.type_name,
+ sp_ctx, &field_model));
+ }
+ RETURN_IF_FAIL(
+ sp_value->SetKey(reinterpret_cast<const wchar_t*>(field.name.c_str()),
+ field_model.Get(), nullptr));
+ }
+
+ *result = sp_value.Detach();
+ return S_OK;
+}
+
+// Creates an IModelObject representing an array of some type that we expect to
+// be defined in the V8 symbols.
+HRESULT GetModelForNativeArray(const uint64_t address,
+ const std::u16string& type_name, size_t count,
+ WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ IModelObject** result) {
+ WRL::ComPtr<IDebugHostType> type =
+ Extension::Current()->GetTypeFromV8Module(sp_ctx, type_name.c_str());
+ if (type == nullptr) return E_FAIL;
+
+ ULONG64 object_size{};
+ RETURN_IF_FAIL(type->GetSize(&object_size));
+
+ ArrayDimension dimensions[] = {
+ {/*start=*/0, /*length=*/count, /*stride=*/object_size}};
+ WRL::ComPtr<IDebugHostType> array_type;
+ RETURN_IF_FAIL(
+ type->CreateArrayOf(/*dimensions=*/1, dimensions, &array_type));
+
+ return sp_data_model_manager->CreateTypedObject(
+ sp_ctx.Get(), Location{address}, array_type.Get(), result);
+}
+
+// Creates an IModelObject that represents an array of structs or compressed
+// tagged values.
+HRESULT GetModelForCustomArray(const Property& prop,
+ WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ IModelObject** result) {
+ // Create the context which should be provided to the indexing and iterating
+ // functionality provided by the parent model. This is instance-specific data,
+ // whereas the parent model object could be shared among many custom arrays.
+ WRL::ComPtr<IndexedFieldData> context_data =
+ WRL::Make<IndexedFieldData>(prop);
+
+ return CreateSyntheticObjectWithParentAndDataContext(
+ sp_ctx.Get(), Extension::Current()->GetIndexedFieldDataModel(),
+ context_data.Get(), result);
+}
+
+// Creates an IModelObject representing the data in the given property.
+HRESULT GetModelForProperty(const Property& prop,
+ WRL::ComPtr<IDebugHostContext>& sp_ctx,
+ IModelObject** result) {
+ switch (prop.type) {
+ case PropertyType::kPointer:
+ return GetModelForBasicField(prop.addr_value, prop.type_name,
+ prop.uncompressed_type_name, sp_ctx, result);
+ case PropertyType::kStruct:
+ return GetModelForStruct(prop.addr_value, prop.fields, sp_ctx, result);
+ case PropertyType::kArray:
+ case PropertyType::kStructArray:
+ if (prop.type == PropertyType::kArray &&
+ prop.type_name == ConvertToU16String(prop.uncompressed_type_name)) {
+ // An array of things that are not structs or compressed tagged values
+ // is most cleanly represented by a native array.
+ return GetModelForNativeArray(prop.addr_value, prop.type_name,
+ prop.length, sp_ctx, result);
+ }
+ // Otherwise, we must construct a custom iterable object.
+ return GetModelForCustomArray(prop, sp_ctx, result);
+ default:
+ return E_FAIL;
+ }
+}
+
+// Creates an IModelObject representing the data in an array at the given index.
+// context_object is expected to be an object of the form created by
+// GetModelForCustomArray, meaning its context for the IndexedFieldParent data
+// model is an IIndexedFieldData containing the description of the array.
+HRESULT GetModelForCustomArrayElement(IModelObject* context_object,
+ size_t index, IModelObject** object) {
+ // Open a few layers of wrapper objects to get to the Property object that
+ // describes the array.
+ WRL::ComPtr<IUnknown> data_model_context;
+ RETURN_IF_FAIL(context_object->GetContextForDataModel(
+ Extension::Current()->GetIndexedFieldDataModel(), &data_model_context));
+ WRL::ComPtr<IIndexedFieldData> indexed_field_data;
+ RETURN_IF_FAIL(data_model_context.As(&indexed_field_data));
+ Property* prop;
+ RETURN_IF_FAIL(indexed_field_data->GetProperty(&prop));
+
+ if (index >= prop->length) {
+ return E_BOUNDS;
+ }
+
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+
+ ULONG64 address = prop->addr_value + index * prop->item_size;
+
+ switch (prop->type) {
+ case PropertyType::kArray:
+ return GetModelForBasicField(address, prop->type_name,
+ prop->uncompressed_type_name, sp_ctx,
+ object);
+ case PropertyType::kStructArray:
+ return GetModelForStruct(address, prop->fields, sp_ctx, object);
+ default:
+ return E_FAIL; // Only array properties should be possible here.
+ }
+}
+
+} // namespace
+
+IFACEMETHODIMP IndexedFieldParent::InitializeObject(
+ IModelObject* model_object,
+ IDebugHostTypeSignature* matching_type_signature,
+ IDebugHostSymbolEnumerator* wildcard_matches) noexcept {
+ return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetName(BSTR* model_name) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetDimensionality(
+ IModelObject* context_object, ULONG64* dimensionality) noexcept {
+ *dimensionality = 1;
+ return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetAt(IModelObject* context_object,
+ ULONG64 indexer_count,
+ IModelObject** indexers,
+ IModelObject** object,
+ IKeyStore** metadata) noexcept {
+ if (indexer_count != 1) return E_INVALIDARG;
+ if (metadata != nullptr) *metadata = nullptr;
+
+ ULONG64 index;
+ RETURN_IF_FAIL(UnboxULong64(indexers[0], &index, /*convert=*/true));
+
+ return GetModelForCustomArrayElement(context_object, index, object);
+}
+
+IFACEMETHODIMP IndexedFieldParent::SetAt(IModelObject* context_object,
+ ULONG64 indexer_count,
+ IModelObject** indexers,
+ IModelObject* value) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetDefaultIndexDimensionality(
+ IModelObject* context_object, ULONG64* dimensionality) noexcept {
+ *dimensionality = 1;
+ return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetIterator(
+ IModelObject* context_object, IModelIterator** iterator) noexcept {
+ auto indexed_field_iterator{WRL::Make<IndexedFieldIterator>(context_object)};
+ *iterator = indexed_field_iterator.Detach();
+ return S_OK;
+}
+
+IndexedFieldIterator::IndexedFieldIterator(IModelObject* context_object)
+ : context_object_(context_object) {}
+IndexedFieldIterator::~IndexedFieldIterator() = default;
+
+IFACEMETHODIMP IndexedFieldIterator::Reset() noexcept {
+ next_ = 0;
+ return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldIterator::GetNext(IModelObject** object,
+ ULONG64 dimensions,
+ IModelObject** indexers,
+ IKeyStore** metadata) noexcept {
+ if (dimensions > 1) return E_INVALIDARG;
+
+ WRL::ComPtr<IModelObject> sp_index, sp_value;
+ RETURN_IF_FAIL(
+ GetModelForCustomArrayElement(context_object_.Get(), next_, &sp_value));
+ RETURN_IF_FAIL(CreateULong64(next_, &sp_index));
+
+ // Everything that could fail (including the bounds check) has succeeded, so
+ // increment the index.
+ ++next_;
+
+ // Write results (none of these steps can fail, which is important because we
+ // transfer ownership of two separate objects).
+ if (dimensions == 1) {
+ indexers[0] = sp_index.Detach();
+ }
+ *object = sp_value.Detach();
+ if (metadata != nullptr) *metadata = nullptr;
+ return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::GetKey(IModelObject* context_object,
+ PCWSTR key, IModelObject** key_value,
+ IKeyStore** metadata,
+ bool* has_key) noexcept {
+ if (metadata != nullptr) *metadata = nullptr;
+
+ WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+ RETURN_IF_FAIL(GetCachedObject(context_object, &sp_v8_cached_object));
+ V8HeapObject* p_v8_heap_object;
+ RETURN_IF_FAIL(sp_v8_cached_object->GetCachedV8HeapObject(&p_v8_heap_object));
+
+ *has_key = false;
+ for (const auto& prop : p_v8_heap_object->properties) {
+ const char16_t* p_key = reinterpret_cast<const char16_t*>(key);
+ if (prop.name.compare(p_key) == 0) {
+ *has_key = true;
+ if (key_value != nullptr) {
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+ RETURN_IF_FAIL(GetModelForProperty(prop, sp_ctx, key_value));
+ }
+ return S_OK;
+ }
+ }
+
+ return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::SetKey(IModelObject* context_object,
+ PCWSTR key, IModelObject* key_value,
+ IKeyStore* metadata) noexcept {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::EnumerateKeys(
+ IModelObject* context_object, IKeyEnumerator** pp_enumerator) noexcept {
+ WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+ RETURN_IF_FAIL(GetCachedObject(context_object, &sp_v8_cached_object));
+
+ auto enumerator{WRL::Make<V8ObjectKeyEnumerator>(sp_v8_cached_object)};
+ *pp_enumerator = enumerator.Detach();
+ return S_OK;
+}
+
+IFACEMETHODIMP V8LocalValueProperty::GetValue(
+ PCWSTR pwsz_key, IModelObject* p_v8_local_instance,
+ IModelObject** pp_value) noexcept {
+ // Get the parametric type within v8::Local<*>
+ // Set value to a pointer to an instance of this type.
+
+ WRL::ComPtr<IDebugHostType> sp_type;
+ RETURN_IF_FAIL(p_v8_local_instance->GetTypeInfo(&sp_type));
+
+ bool is_generic;
+ RETURN_IF_FAIL(sp_type->IsGeneric(&is_generic));
+ if (!is_generic) return E_FAIL;
+
+ WRL::ComPtr<IDebugHostSymbol> sp_generic_arg;
+ RETURN_IF_FAIL(sp_type->GetGenericArgumentAt(0, &sp_generic_arg));
+
+ _bstr_t generic_name;
+ RETURN_IF_FAIL(sp_generic_arg->GetName(generic_name.GetAddress()));
+
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(p_v8_local_instance->GetContext(&sp_ctx));
+
+ WRL::ComPtr<IDebugHostType> sp_value_type =
+ Extension::Current()->GetTypeFromV8Module(
+ sp_ctx, reinterpret_cast<const char16_t*>(
+ static_cast<const wchar_t*>(generic_name)));
+ if (sp_value_type == nullptr ||
+ !Extension::Current()->DoesTypeDeriveFromObject(sp_value_type)) {
+ // The value type doesn't derive from v8::internal::Object (probably a
+ // public API type), so just use plain v8::internal::Object. We could
+ // consider mapping some public API types to their corresponding internal
+ // types here, at the possible cost of increased maintenance.
+ sp_value_type = Extension::Current()->GetV8ObjectType(sp_ctx);
+ }
+
+ Location loc;
+ RETURN_IF_FAIL(p_v8_local_instance->GetLocation(&loc));
+
+ // Read the pointer at the Object location
+ ULONG64 obj_address;
+ RETURN_IF_FAIL(
+ sp_debug_host_memory->ReadPointers(sp_ctx.Get(), loc, 1, &obj_address));
+
+ // If the val_ is a nullptr, then there is no value in the Local.
+ if (obj_address == 0) {
+ RETURN_IF_FAIL(CreateString(std::u16string{u"<empty>"}, pp_value));
+ } else {
+ // Should be a v8::internal::Object at the address
+ RETURN_IF_FAIL(sp_data_model_manager->CreateTypedObject(
+ sp_ctx.Get(), obj_address, sp_value_type.Get(), pp_value));
+ }
+
+ return S_OK;
+}
+
+IFACEMETHODIMP V8LocalValueProperty::SetValue(
+ PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+ IModelObject* /*p_value*/) noexcept {
+ return E_NOTIMPL;
+}
+
+constexpr wchar_t usage[] =
+ LR"(Invalid arguments.
+First argument should be a uint64 representing the tagged value to investigate.
+Second argument is optional, and may be a fully-qualified type name such as
+v8::internal::String.)";
+
+IFACEMETHODIMP InspectV8ObjectMethod::Call(IModelObject* p_context_object,
+ ULONG64 arg_count,
+ _In_reads_(arg_count)
+ IModelObject** pp_arguments,
+ IModelObject** pp_result,
+ IKeyStore** pp_metadata) noexcept {
+ // Read the arguments.
+ ULONG64 tagged_value;
+ _bstr_t type_name;
+ if (arg_count < 1 ||
+ FAILED(UnboxULong64(pp_arguments[0], &tagged_value, /*convert=*/true)) ||
+ (arg_count >= 2 &&
+ FAILED(UnboxString(pp_arguments[1], type_name.GetAddress())))) {
+ sp_data_model_manager->CreateErrorObject(E_INVALIDARG, usage, pp_result);
+ return E_INVALIDARG;
+ }
+
+ WRL::ComPtr<IDebugHostContext> sp_ctx;
+ RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_ctx));
+
+ // We can't use CreateTypedObject for a value which may not actually reside
+ // anywhere in memory, so create a synthetic object.
+ WRL::ComPtr<V8CachedObject> cached_object =
+ WRL::Make<V8CachedObject>(::GetHeapObject(
+ sp_ctx, tagged_value, 0, static_cast<const char*>(type_name),
+ /*is_compressed=*/false));
+ return CreateSyntheticObjectForV8Object(sp_ctx.Get(), cached_object.Get(),
+ pp_result);
+}
diff --git a/deps/v8/tools/v8windbg/src/object-inspection.h b/deps/v8/tools/v8windbg/src/object-inspection.h
new file mode 100644
index 0000000000..27283ca556
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/object-inspection.h
@@ -0,0 +1,261 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_OBJECT_INSPECTION_H_
+#define V8_TOOLS_V8WINDBG_SRC_OBJECT_INSPECTION_H_
+
+#include <comutil.h>
+#include <wrl/implements.h>
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "tools/v8windbg/base/dbgext.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+// The representation of the underlying V8 object that will be cached on the
+// DataModel representation. (Needs to implement IUnknown).
+class __declspec(uuid("6392E072-37BB-4220-A5FF-114098923A02")) IV8CachedObject
+ : public IUnknown {
+ public:
+ virtual HRESULT __stdcall GetCachedV8HeapObject(
+ V8HeapObject** pp_heap_object) = 0;
+};
+
+class V8CachedObject
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IV8CachedObject> {
+ public:
+ V8CachedObject(Location location, std::string uncompressed_type_name,
+ WRL::ComPtr<IDebugHostContext> context, bool is_compressed);
+ V8CachedObject(V8HeapObject heap_object);
+ ~V8CachedObject() override;
+
+ static HRESULT Create(IModelObject* p_v8_object_instance,
+ IV8CachedObject** result);
+
+ IFACEMETHOD(GetCachedV8HeapObject)(V8HeapObject** pp_heap_object);
+
+ private:
+ // The properties and description of the object, if already read.
+ V8HeapObject heap_object_;
+ bool heap_object_initialized_ = false;
+
+ // Data that is necessary for reading the object.
+ Location location_;
+ std::string uncompressed_type_name_;
+ WRL::ComPtr<IDebugHostContext> context_;
+ bool is_compressed_ = false;
+};
+
+// A simple COM wrapper class to hold data required for IndexedFieldParent.
+// (Needs to implement IUnknown).
+class __declspec(uuid("6392E072-37BB-4220-A5FF-114098923A03")) IIndexedFieldData
+ : public IUnknown {
+ public:
+ // Get a pointer to the Property object held by this IIndexedFieldData. The
+ // pointer returned in this way is valid only while the containing
+ // IIndexedFieldData is alive.
+ virtual HRESULT __stdcall GetProperty(Property** property) = 0;
+};
+
+class IndexedFieldData
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IIndexedFieldData> {
+ public:
+ IndexedFieldData(Property property);
+ ~IndexedFieldData() override;
+
+ // Get a pointer to the Property object held by this IndexedFieldData. The
+ // pointer returned in this way is valid only while the containing
+ // IndexedFieldData is alive.
+ IFACEMETHOD(GetProperty)(Property** property);
+
+ private:
+ Property property_;
+};
+
+// A parent model that provides indexing support for fields that contain arrays
+// of something more complicated than basic native types.
+class IndexedFieldParent
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IDataModelConcept, IIterableConcept, IIndexableConcept> {
+ public:
+ // IDataModelConcept
+ IFACEMETHOD(InitializeObject)
+ (IModelObject* model_object, IDebugHostTypeSignature* matching_type_signature,
+ IDebugHostSymbolEnumerator* wildcard_matches);
+
+ // IDataModelConcept
+ IFACEMETHOD(GetName)(BSTR* model_name);
+
+ // IIndexableConcept
+ IFACEMETHOD(GetAt)
+ (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+ _COM_Errorptr_ IModelObject** object, IKeyStore** metadata);
+
+ // IIndexableConcept
+ IFACEMETHOD(GetDimensionality)
+ (IModelObject* context_object, ULONG64* dimensionality);
+
+ // IIndexableConcept
+ IFACEMETHOD(SetAt)
+ (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+ IModelObject* value);
+
+ // IIterableConcept
+ IFACEMETHOD(GetDefaultIndexDimensionality)
+ (IModelObject* context_object, ULONG64* dimensionality);
+
+ // IIterableConcept
+ IFACEMETHOD(GetIterator)
+ (IModelObject* context_object, IModelIterator** iterator);
+};
+
+// An iterator for the values within an array field.
+class IndexedFieldIterator
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelIterator> {
+ public:
+ IndexedFieldIterator(IModelObject* context_object);
+ ~IndexedFieldIterator() override;
+
+ IFACEMETHOD(Reset)();
+
+ IFACEMETHOD(GetNext)
+ (IModelObject** object, ULONG64 dimensions, IModelObject** indexers,
+ IKeyStore** metadata);
+
+ private:
+ size_t next_ = 0;
+ WRL::ComPtr<IModelObject> context_object_;
+};
+
+// Enumerates the names of fields on V8 objects.
+class V8ObjectKeyEnumerator
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IKeyEnumerator> {
+ public:
+ V8ObjectKeyEnumerator(WRL::ComPtr<IV8CachedObject>& v8_cached_object);
+ ~V8ObjectKeyEnumerator() override;
+
+ IFACEMETHOD(Reset)();
+
+ // This method will be called with a nullptr 'value' for each key if returned
+ // from an IDynamicKeyProviderConcept. It will call GetKey on the
+ // IDynamicKeyProviderConcept interface after each key returned.
+ IFACEMETHOD(GetNext)(BSTR* key, IModelObject** value, IKeyStore** metadata);
+
+ private:
+ int index_ = 0;
+ WRL::ComPtr<IV8CachedObject> sp_v8_cached_object_;
+};
+
+// A parent model for V8 handle types such as v8::internal::Handle<*>.
+class V8LocalDataModel
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IDataModelConcept> {
+ public:
+ IFACEMETHOD(InitializeObject)
+ (IModelObject* model_object, IDebugHostTypeSignature* matching_type_signature,
+ IDebugHostSymbolEnumerator* wildcard_matches);
+
+ IFACEMETHOD(GetName)(BSTR* model_name);
+};
+
+// A parent model for V8 object types such as v8::internal::Object.
+class V8ObjectDataModel
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IDataModelConcept, IStringDisplayableConcept,
+ IDynamicKeyProviderConcept> {
+ public:
+ HRESULT GetCachedObject(IModelObject* context_object,
+ IV8CachedObject** result) {
+ // Get the IModelObject for this parent object. As it is a dynamic provider,
+ // there is only one parent directly on the object.
+ WRL::ComPtr<IModelObject> sp_parent_model, sp_context_adjuster;
+ RETURN_IF_FAIL(context_object->GetParentModel(0, &sp_parent_model,
+ &sp_context_adjuster));
+
+ // See if the cached object is already present
+ WRL::ComPtr<IUnknown> sp_context;
+ HRESULT hr = context_object->GetContextForDataModel(sp_parent_model.Get(),
+ &sp_context);
+
+ WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+
+ if (SUCCEEDED(hr)) {
+ RETURN_IF_FAIL(sp_context.As(&sp_v8_cached_object));
+ } else {
+ RETURN_IF_FAIL(
+ V8CachedObject::Create(context_object, &sp_v8_cached_object));
+ RETURN_IF_FAIL(sp_v8_cached_object.As(&sp_context));
+ RETURN_IF_FAIL(context_object->SetContextForDataModel(
+ sp_parent_model.Get(), sp_context.Get()));
+ }
+
+ *result = sp_v8_cached_object.Detach();
+ return S_OK;
+ }
+
+ IFACEMETHOD(InitializeObject)
+ (IModelObject* model_object, IDebugHostTypeSignature* matching_type_signature,
+ IDebugHostSymbolEnumerator* wildcard_matches);
+
+ IFACEMETHOD(GetName)(BSTR* model_name);
+
+ IFACEMETHOD(ToDisplayString)
+ (IModelObject* context_object, IKeyStore* metadata, BSTR* display_string);
+
+ // IDynamicKeyProviderConcept
+ IFACEMETHOD(GetKey)
+ (IModelObject* context_object, PCWSTR key, IModelObject** key_value,
+ IKeyStore** metadata, bool* has_key);
+
+ IFACEMETHOD(SetKey)
+ (IModelObject* context_object, PCWSTR key, IModelObject* key_value,
+ IKeyStore* metadata);
+
+ IFACEMETHOD(EnumerateKeys)
+ (IModelObject* context_object, IKeyEnumerator** pp_enumerator);
+};
+
+// The implemention of the "Value" getter for V8 handle types.
+class V8LocalValueProperty
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelPropertyAccessor> {
+ public:
+ IFACEMETHOD(GetValue)
+ (PCWSTR pwsz_key, IModelObject* p_v8_object_instance,
+ IModelObject** pp_value);
+
+ IFACEMETHOD(SetValue)
+ (PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+ IModelObject* /*p_value*/);
+};
+
+// A way that someone can directly inspect a tagged value, even if that value
+// isn't in memory (from a register, or the user's imagination, etc.).
+class InspectV8ObjectMethod
+ : public WRL::RuntimeClass<
+ WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+ IModelMethod> {
+ public:
+ IFACEMETHOD(Call)
+ (IModelObject* p_context_object, ULONG64 arg_count,
+ _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+ IKeyStore** pp_metadata);
+};
+
+#endif // V8_TOOLS_V8WINDBG_SRC_OBJECT_INSPECTION_H_
diff --git a/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc
new file mode 100644
index 0000000000..4a8dcc9add
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc
@@ -0,0 +1,157 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+
+#include <Windows.h>
+#include <crtdbg.h>
+
+#include "src/common/globals.h"
+#include "tools/debug_helper/debug-helper.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+namespace d = v8::debug_helper;
+
+// We need a plain C function pointer for interop with v8_debug_helper. We can
+// use this to get one as long as we never need two at once.
+class MemReaderScope {
+ public:
+ explicit MemReaderScope(WRL::ComPtr<IDebugHostContext> sp_context)
+ : sp_context_(sp_context) {
+ _ASSERTE(!context_);
+ context_ = sp_context_.Get();
+ }
+ ~MemReaderScope() { context_ = nullptr; }
+ d::MemoryAccessor GetReader() { return &MemReaderScope::Read; }
+
+ private:
+ MemReaderScope(const MemReaderScope&) = delete;
+ MemReaderScope& operator=(const MemReaderScope&) = delete;
+ static d::MemoryAccessResult Read(uintptr_t address, uint8_t* destination,
+ size_t byte_count) {
+ ULONG64 bytes_read;
+ Location loc{address};
+ HRESULT hr = sp_debug_host_memory->ReadBytes(context_, loc, destination,
+ byte_count, &bytes_read);
+ // TODO determine when an address is valid but inaccessible
+ return SUCCEEDED(hr) ? d::MemoryAccessResult::kOk
+ : d::MemoryAccessResult::kAddressNotValid;
+ }
+ WRL::ComPtr<IDebugHostContext> sp_context_;
+ static IDebugHostContext* context_;
+};
+IDebugHostContext* MemReaderScope::context_;
+
+StructField::StructField(std::u16string field_name, std::u16string type_name,
+ std::string uncompressed_type_name, uint64_t offset,
+ uint8_t num_bits, uint8_t shift_bits)
+ : name(field_name),
+ type_name(type_name),
+ uncompressed_type_name(uncompressed_type_name),
+ offset(offset),
+ num_bits(num_bits),
+ shift_bits(shift_bits) {}
+StructField::~StructField() = default;
+StructField::StructField(const StructField&) = default;
+StructField::StructField(StructField&&) = default;
+StructField& StructField::operator=(const StructField&) = default;
+StructField& StructField::operator=(StructField&&) = default;
+
+Property::Property(std::u16string property_name, std::u16string type_name,
+ std::string uncompressed_type_name, uint64_t address,
+ size_t item_size)
+ : name(property_name),
+ type(PropertyType::kPointer),
+ type_name(type_name),
+ uncompressed_type_name(uncompressed_type_name),
+ addr_value(address),
+ item_size(item_size) {}
+Property::~Property() = default;
+Property::Property(const Property&) = default;
+Property::Property(Property&&) = default;
+Property& Property::operator=(const Property&) = default;
+Property& Property::operator=(Property&&) = default;
+
+V8HeapObject::V8HeapObject() = default;
+V8HeapObject::~V8HeapObject() = default;
+V8HeapObject::V8HeapObject(const V8HeapObject&) = default;
+V8HeapObject::V8HeapObject(V8HeapObject&&) = default;
+V8HeapObject& V8HeapObject::operator=(const V8HeapObject&) = default;
+V8HeapObject& V8HeapObject::operator=(V8HeapObject&&) = default;
+
+V8HeapObject GetHeapObject(WRL::ComPtr<IDebugHostContext> sp_context,
+ uint64_t tagged_ptr, uint64_t referring_pointer,
+ const char* type_name, bool is_compressed) {
+ // Read the value at the address, and see if it is a tagged pointer
+
+ V8HeapObject obj;
+ MemReaderScope reader_scope(sp_context);
+
+ d::HeapAddresses heap_addresses = {0, 0, 0, 0};
+ // TODO ideally we'd provide real heap page pointers. For now, just testing
+ // decompression based on the pointer to wherever we found this value,
+ // which is likely (though not guaranteed) to be a heap pointer itself.
+ heap_addresses.any_heap_pointer = referring_pointer;
+
+ auto props = d::GetObjectProperties(tagged_ptr, reader_scope.GetReader(),
+ heap_addresses, type_name);
+ obj.friendly_name = ConvertToU16String(props->brief);
+ for (size_t property_index = 0; property_index < props->num_properties;
+ ++property_index) {
+ const auto& source_prop = *props->properties[property_index];
+ Property dest_prop(ConvertToU16String(source_prop.name),
+ ConvertToU16String(source_prop.type),
+ source_prop.decompressed_type, source_prop.address,
+ source_prop.size);
+ if (source_prop.kind != d::PropertyKind::kSingle) {
+ dest_prop.type = PropertyType::kArray;
+ dest_prop.length = source_prop.num_values;
+ }
+ if (dest_prop.type_name.empty() || source_prop.num_struct_fields > 0) {
+ // If the helper library didn't provide a type, then it should have
+ // provided struct fields instead. Set the struct type flag and copy the
+ // fields into the result.
+ dest_prop.type =
+ static_cast<PropertyType>(static_cast<int>(dest_prop.type) |
+ static_cast<int>(PropertyType::kStruct));
+ for (size_t field_index = 0; field_index < source_prop.num_struct_fields;
+ ++field_index) {
+ const auto& struct_field = *source_prop.struct_fields[field_index];
+ dest_prop.fields.push_back({ConvertToU16String(struct_field.name),
+ ConvertToU16String(struct_field.type),
+ struct_field.decompressed_type,
+ struct_field.offset, struct_field.num_bits,
+ struct_field.shift_bits});
+ }
+ }
+ obj.properties.push_back(dest_prop);
+ }
+
+ // For each guessed type, create a synthetic property that will request data
+ // about the same object again but with a more specific type hint.
+ if (referring_pointer != 0) {
+ for (size_t type_index = 0; type_index < props->num_guessed_types;
+ ++type_index) {
+ const std::string& type_name = props->guessed_types[type_index];
+ Property dest_prop(
+ ConvertToU16String(("guessed type " + type_name).c_str()),
+ ConvertToU16String(is_compressed ? kTaggedValue : type_name),
+ type_name, referring_pointer,
+ is_compressed ? i::kTaggedSize : sizeof(void*));
+ obj.properties.push_back(dest_prop);
+ }
+ }
+
+ return obj;
+}
+
+std::vector<std::u16string> ListObjectClasses() {
+ const d::ClassList* class_list = d::ListObjectClasses();
+ std::vector<std::u16string> result;
+ for (size_t i = 0; i < class_list->num_class_names; ++i) {
+ result.push_back(ConvertToU16String(class_list->class_names[i]));
+ }
+ return result;
+}
diff --git a/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h
new file mode 100644
index 0000000000..96bd59b30e
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h
@@ -0,0 +1,138 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_
+#define V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_
+
+#include <wrl/client.h>
+
+#include <DbgModel.h>
+
+#include <cstdint>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace WRL = Microsoft::WRL;
+
+constexpr char kObject[] = "v8::internal::Object";
+constexpr char16_t kObjectU[] = u"v8::internal::Object";
+constexpr char kTaggedValue[] = "v8::internal::TaggedValue";
+constexpr char16_t kTaggedValueU[] = u"v8::internal::TaggedValue";
+
+enum class PropertyType {
+ kPointer = 0,
+ kArray = 1,
+ kStruct = 2,
+ kStructArray = kArray | kStruct,
+};
+
+struct StructField {
+ StructField(std::u16string field_name, std::u16string type_name,
+ std::string uncompressed_type_name, uint64_t address,
+ uint8_t num_bits, uint8_t shift_bits);
+ ~StructField();
+ StructField(const StructField&);
+ StructField(StructField&&);
+ StructField& operator=(const StructField&);
+ StructField& operator=(StructField&&);
+
+ std::u16string name;
+
+ // Statically-determined type, such as from .tq definition. This type should
+ // be treated as if it were used in the v8::internal namespace; that is, type
+ // "X::Y" can mean any of the following, in order of decreasing preference:
+ // - v8::internal::X::Y
+ // - v8::X::Y
+ // - X::Y
+ std::u16string type_name;
+
+ // In some cases, |type_name| may be a simple type representing a compressed
+ // pointer such as v8::internal::TaggedValue. In those cases,
+ // |uncompressed_type_name| will contain the type of the object when
+ // decompressed. Otherwise, |uncompressed_type_name| will match |type_name|.
+ // In any case, it is safe to pass the |uncompressed_type_name| value as the
+ // type_hint on a subsequent call to GetObjectProperties.
+ std::string uncompressed_type_name;
+
+ // Offset, in bytes, from beginning of struct.
+ uint64_t offset;
+
+ // The number of bits that are present, if this value is a bitfield. Zero
+ // indicates that this value is not a bitfield (the full value is stored).
+ uint8_t num_bits;
+
+ // The number of bits by which this value has been left-shifted for storage as
+ // a bitfield.
+ uint8_t shift_bits;
+};
+
+struct Property {
+ Property(std::u16string property_name, std::u16string type_name,
+ std::string uncompressed_type_name, uint64_t address,
+ size_t item_size);
+ ~Property();
+ Property(const Property&);
+ Property(Property&&);
+ Property& operator=(const Property&);
+ Property& operator=(Property&&);
+
+ std::u16string name;
+ PropertyType type;
+
+ // Statically-determined type, such as from .tq definition. Can be an empty
+ // string if this property is itself a Torque-defined struct; in that case use
+ // |fields| instead. This type should be treated as if it were used in the
+ // v8::internal namespace; that is, type "X::Y" can mean any of the following,
+ // in order of decreasing preference:
+ // - v8::internal::X::Y
+ // - v8::X::Y
+ // - X::Y
+ std::u16string type_name;
+
+ // In some cases, |type_name| may be a simple type representing a compressed
+ // pointer such as v8::internal::TaggedValue. In those cases,
+ // |uncompressed_type_name| will contain the type of the object when
+ // decompressed. Otherwise, |uncompressed_type_name| will match |type_name|.
+ // In any case, it is safe to pass the |uncompressed_type_name| value as the
+ // type_hint on a subsequent call to GetObjectProperties.
+ std::string uncompressed_type_name;
+
+ // The address where the property value can be found in the debuggee's address
+ // space, or the address of the first value for an array.
+ uint64_t addr_value;
+
+ // Size of each array item, if this property is an array.
+ size_t item_size;
+
+ // Number of array items, if this property is an array.
+ size_t length = 0;
+
+ // Fields within this property, if this property is a struct, or fields within
+ // each array element, if this property is a struct array.
+ std::vector<StructField> fields;
+};
+
+struct V8HeapObject {
+ V8HeapObject();
+ ~V8HeapObject();
+ V8HeapObject(const V8HeapObject&);
+ V8HeapObject(V8HeapObject&&);
+ V8HeapObject& operator=(const V8HeapObject&);
+ V8HeapObject& operator=(V8HeapObject&&);
+ std::u16string friendly_name; // String to print in single-line description.
+ std::vector<Property> properties;
+};
+
+V8HeapObject GetHeapObject(WRL::ComPtr<IDebugHostContext> sp_context,
+ uint64_t address, uint64_t referring_pointer,
+ const char* type_name, bool is_compressed);
+
+// Expand a compressed pointer from 32 bits to the format that
+// GetObjectProperties expects for compressed pointers.
+inline uint64_t ExpandCompressedPointer(uint32_t ptr) { return ptr; }
+
+std::vector<std::u16string> ListObjectClasses();
+
+#endif // V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_
diff --git a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
new file mode 100644
index 0000000000..68c90d2833
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc
@@ -0,0 +1,340 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+#include <iostream>
+
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/cur-isolate.h"
+#include "tools/v8windbg/src/list-chunks.h"
+#include "tools/v8windbg/src/local-variables.h"
+#include "tools/v8windbg/src/object-inspection.h"
+
+std::unique_ptr<Extension> Extension::current_extension_ = nullptr;
+const wchar_t* pcur_isolate = L"curisolate";
+const wchar_t* plist_chunks = L"listchunks";
+const wchar_t* pv8_object = L"v8object";
+
+HRESULT CreateExtension() {
+ if (Extension::Current() != nullptr || sp_data_model_manager == nullptr ||
+ sp_debug_host == nullptr) {
+ return E_FAIL;
+ } else {
+ std::unique_ptr<Extension> new_extension(new (std::nothrow) Extension());
+ if (new_extension == nullptr) return E_FAIL;
+ RETURN_IF_FAIL(new_extension->Initialize());
+ Extension::SetExtension(std::move(new_extension));
+ return S_OK;
+ }
+}
+
+void DestroyExtension() { Extension::SetExtension(nullptr); }
+
+bool Extension::DoesTypeDeriveFromObject(
+ const WRL::ComPtr<IDebugHostType>& sp_type) {
+ _bstr_t name;
+ HRESULT hr = sp_type->GetName(name.GetAddress());
+ if (!SUCCEEDED(hr)) return false;
+ if (std::string(static_cast<const char*>(name)) == kObject) return true;
+
+ WRL::ComPtr<IDebugHostSymbolEnumerator> sp_super_class_enumerator;
+ hr = sp_type->EnumerateChildren(SymbolKind::SymbolBaseClass, nullptr,
+ &sp_super_class_enumerator);
+ if (!SUCCEEDED(hr)) return false;
+
+ while (true) {
+ WRL::ComPtr<IDebugHostSymbol> sp_type_symbol;
+ if (sp_super_class_enumerator->GetNext(&sp_type_symbol) != S_OK) break;
+ WRL::ComPtr<IDebugHostBaseClass> sp_base_class;
+ if (FAILED(sp_type_symbol.As(&sp_base_class))) continue;
+ WRL::ComPtr<IDebugHostType> sp_base_type;
+ hr = sp_base_class->GetType(&sp_base_type);
+ if (!SUCCEEDED(hr)) continue;
+ if (DoesTypeDeriveFromObject(sp_base_type)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+WRL::ComPtr<IDebugHostType> Extension::GetV8ObjectType(
+ WRL::ComPtr<IDebugHostContext>& sp_ctx) {
+ return GetTypeFromV8Module(sp_ctx, kObjectU);
+}
+
+WRL::ComPtr<IDebugHostType> Extension::GetTypeFromV8Module(
+ WRL::ComPtr<IDebugHostContext>& sp_ctx, const char16_t* type_name) {
+ bool is_equal;
+ if (sp_v8_module_ctx_ == nullptr ||
+ !SUCCEEDED(sp_v8_module_ctx_->IsEqualTo(sp_ctx.Get(), &is_equal)) ||
+ !is_equal) {
+ // Context changed; clear the dictionary.
+ cached_v8_module_types_.clear();
+ }
+
+ GetV8Module(sp_ctx); // Will force the correct module to load
+ if (sp_v8_module_ == nullptr) return nullptr;
+
+ auto& dictionary_entry = cached_v8_module_types_[type_name];
+ if (dictionary_entry == nullptr) {
+ const std::wstring type_name_w(reinterpret_cast<const wchar_t*>(type_name));
+ // The contract from debug_helper functions is to provide type names that
+ // would be valid if used in C++ code within the v8::internal namespace.
+ // They might be fully qualified but aren't required to be. Thus, we must
+ // simluate an "unqualified name lookup" here, by searching for the type
+ // starting in the innermost namespace and working outward.
+ if (SUCCEEDED(sp_v8_module_->FindTypeByName(
+ (L"v8::internal::" + type_name_w).c_str(), &dictionary_entry))) {
+ return dictionary_entry;
+ }
+ if (SUCCEEDED(sp_v8_module_->FindTypeByName((L"v8::" + type_name_w).c_str(),
+ &dictionary_entry))) {
+ return dictionary_entry;
+ }
+ sp_v8_module_->FindTypeByName(reinterpret_cast<PCWSTR>(type_name),
+ &dictionary_entry);
+ }
+ return dictionary_entry;
+}
+
+namespace {
+
+// Returns whether the given module appears to have symbols for V8 code.
+bool IsV8Module(IDebugHostModule* module) {
+ WRL::ComPtr<IDebugHostSymbol> sp_isolate_sym;
+ // The below symbol is specific to the main V8 module.
+ if (FAILED(module->FindSymbolByName(L"v8::Script::Run", &sp_isolate_sym))) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+WRL::ComPtr<IDebugHostModule> Extension::GetV8Module(
+ WRL::ComPtr<IDebugHostContext>& sp_ctx) {
+ // Return the cached version if it exists and the context is the same
+
+ // Note: Context will often have the CUSTOM flag set, which never compares
+ // equal. So for now DON'T compare by context, but by proc_id. (An API is in
+ // progress to compare by address space, which should be usable when shipped).
+ /*
+ if (sp_v8_module_ != nullptr) {
+ bool is_equal;
+ if (SUCCEEDED(sp_v8_module_ctx_->IsEqualTo(sp_ctx.Get(), &is_equal)) &&
+ is_equal) { return sp_v8_module_; } else { sp_v8_module_ = nullptr;
+ sp_v8_module_ctx_ = nullptr;
+ }
+ }
+ */
+ WRL::ComPtr<IDebugSystemObjects> sp_sys_objects;
+ ULONG proc_id = 0;
+ if (SUCCEEDED(sp_debug_control.As(&sp_sys_objects))) {
+ if (SUCCEEDED(sp_sys_objects->GetCurrentProcessSystemId(&proc_id))) {
+ if (proc_id == v8_module_proc_id_ && sp_v8_module_ != nullptr)
+ return sp_v8_module_;
+ }
+ }
+
+ // Search first for a few known module names, to avoid loading symbols for
+ // unrelated modules if we can easily avoid it. Generally, failing to find a
+ // module is fast but failing to find a symbol within a module is slow. Note
+ // that "v8" is listed first because it's highly likely to be the correct
+ // module if it exists. The others might include V8 symbols depending on the
+ // build configuration.
+ std::vector<const wchar_t*> known_names = {
+ L"v8", L"v8_for_testing", L"cctest_exe", L"chrome",
+ L"d8", L"msedge", L"node", L"unittests_exe"};
+ for (const wchar_t* name : known_names) {
+ WRL::ComPtr<IDebugHostModule> sp_module;
+ if (SUCCEEDED(sp_debug_host_symbols->FindModuleByName(sp_ctx.Get(), name,
+ &sp_module))) {
+ if (IsV8Module(sp_module.Get())) {
+ sp_v8_module_ = sp_module;
+ sp_v8_module_ctx_ = sp_ctx;
+ v8_module_proc_id_ = proc_id;
+ return sp_v8_module_;
+ }
+ }
+ }
+
+ // Loop through all modules looking for the one that holds a known symbol.
+ WRL::ComPtr<IDebugHostSymbolEnumerator> sp_enum;
+ if (SUCCEEDED(
+ sp_debug_host_symbols->EnumerateModules(sp_ctx.Get(), &sp_enum))) {
+ HRESULT hr = S_OK;
+ while (true) {
+ WRL::ComPtr<IDebugHostSymbol> sp_mod_sym;
+ hr = sp_enum->GetNext(&sp_mod_sym);
+ // hr == E_BOUNDS : hit the end of the enumerator
+ // hr == E_ABORT : a user interrupt was requested
+ if (FAILED(hr)) break;
+ WRL::ComPtr<IDebugHostModule> sp_module;
+ if (SUCCEEDED(sp_mod_sym.As(&sp_module))) /* should always succeed */
+ {
+ if (IsV8Module(sp_module.Get())) {
+ sp_v8_module_ = sp_module;
+ sp_v8_module_ctx_ = sp_ctx;
+ v8_module_proc_id_ = proc_id;
+ break;
+ }
+ }
+ }
+ }
+ // This will be the located module, or still nullptr if above fails
+ return sp_v8_module_;
+}
+
+Extension::Extension() = default;
+
+HRESULT Extension::Initialize() {
+ // Create an instance of the DataModel parent for v8::internal::Object types.
+ auto object_data_model{WRL::Make<V8ObjectDataModel>()};
+ RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+ object_data_model.Get(), &sp_object_data_model_));
+ RETURN_IF_FAIL(sp_object_data_model_->SetConcept(
+ __uuidof(IStringDisplayableConcept),
+ static_cast<IStringDisplayableConcept*>(object_data_model.Get()),
+ nullptr));
+ RETURN_IF_FAIL(sp_object_data_model_->SetConcept(
+ __uuidof(IDynamicKeyProviderConcept),
+ static_cast<IDynamicKeyProviderConcept*>(object_data_model.Get()),
+ nullptr));
+
+ // Register that parent model for all known types of V8 object.
+ std::vector<std::u16string> object_class_names = ListObjectClasses();
+ object_class_names.push_back(kObjectU);
+ object_class_names.push_back(kTaggedValueU);
+ for (const std::u16string& name : object_class_names) {
+ WRL::ComPtr<IDebugHostTypeSignature> sp_object_type_signature;
+ RETURN_IF_FAIL(sp_debug_host_symbols->CreateTypeSignature(
+ reinterpret_cast<const wchar_t*>(name.c_str()), nullptr,
+ &sp_object_type_signature));
+ RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature(
+ sp_object_type_signature.Get(), sp_object_data_model_.Get()));
+ registered_object_types_.push_back(sp_object_type_signature);
+ }
+
+ // Create an instance of the DataModel parent for custom iterable fields.
+ auto indexed_field_model{WRL::Make<IndexedFieldParent>()};
+ RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+ indexed_field_model.Get(), &sp_indexed_field_model_));
+ RETURN_IF_FAIL(sp_indexed_field_model_->SetConcept(
+ __uuidof(IIndexableConcept),
+ static_cast<IIndexableConcept*>(indexed_field_model.Get()), nullptr));
+ RETURN_IF_FAIL(sp_indexed_field_model_->SetConcept(
+ __uuidof(IIterableConcept),
+ static_cast<IIterableConcept*>(indexed_field_model.Get()), nullptr));
+
+ // Create an instance of the DataModel parent class for v8::Local<*> types.
+ auto local_data_model{WRL::Make<V8LocalDataModel>()};
+ RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+ local_data_model.Get(), &sp_local_data_model_));
+
+ // Register that parent model for all known types that act like v8::Local.
+ std::vector<const wchar_t*> handle_class_names = {
+ L"v8::Local<*>", L"v8::MaybeLocal<*>", L"v8::internal::Handle<*>",
+ L"v8::internal::MaybeHandle<*>"};
+ for (const wchar_t* name : handle_class_names) {
+ WRL::ComPtr<IDebugHostTypeSignature> signature;
+ RETURN_IF_FAIL(
+ sp_debug_host_symbols->CreateTypeSignature(name, nullptr, &signature));
+ RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature(
+ signature.Get(), sp_local_data_model_.Get()));
+ registered_handle_types_.push_back(signature);
+ }
+
+ // Add the 'Value' property to the parent model.
+ auto local_value_property{WRL::Make<V8LocalValueProperty>()};
+ WRL::ComPtr<IModelObject> sp_local_value_property_model;
+ RETURN_IF_FAIL(CreateProperty(sp_data_model_manager.Get(),
+ local_value_property.Get(),
+ &sp_local_value_property_model));
+ RETURN_IF_FAIL(sp_local_data_model_->SetKey(
+ L"Value", sp_local_value_property_model.Get(), nullptr));
+
+ // Register all function aliases.
+ std::vector<std::pair<const wchar_t*, WRL::ComPtr<IModelMethod>>> functions =
+ {{pcur_isolate, WRL::Make<CurrIsolateAlias>()},
+ {plist_chunks, WRL::Make<ListChunksAlias>()},
+ {pv8_object, WRL::Make<InspectV8ObjectMethod>()}};
+ for (const auto& function : functions) {
+ WRL::ComPtr<IModelObject> method;
+ RETURN_IF_FAIL(CreateMethod(sp_data_model_manager.Get(),
+ function.second.Get(), &method));
+ RETURN_IF_FAIL(sp_debug_host_extensibility->CreateFunctionAlias(
+ function.first, method.Get()));
+ }
+
+ // Register a handler for supplying stack frame locals. It has to override the
+ // getter functions for "LocalVariables" and "Parameters".
+ WRL::ComPtr<IModelObject> stack_frame;
+ RETURN_IF_FAIL(sp_data_model_manager->AcquireNamedModel(
+ L"Debugger.Models.StackFrame", &stack_frame));
+ RETURN_IF_FAIL(OverrideLocalsGetter(stack_frame.Get(), L"LocalVariables",
+ /*is_parameters=*/false));
+ RETURN_IF_FAIL(OverrideLocalsGetter(stack_frame.Get(), L"Parameters",
+ /*is_parameters=*/true));
+
+ return S_OK;
+}
+
+HRESULT Extension::OverrideLocalsGetter(IModelObject* stack_frame,
+ const wchar_t* key_name,
+ bool is_parameters) {
+ WRL::ComPtr<IModelObject> original_boxed_getter;
+ WRL::ComPtr<IKeyStore> original_getter_metadata;
+ RETURN_IF_FAIL(stack_frame->GetKey(key_name, &original_boxed_getter,
+ &original_getter_metadata));
+ WRL::ComPtr<IModelPropertyAccessor> original_getter;
+ RETURN_IF_FAIL(UnboxProperty(original_boxed_getter.Get(), &original_getter));
+ auto new_getter{WRL::Make<V8LocalVariables>(original_getter, is_parameters)};
+ WRL::ComPtr<IModelObject> new_boxed_getter;
+ RETURN_IF_FAIL(CreateProperty(sp_data_model_manager.Get(), new_getter.Get(),
+ &new_boxed_getter));
+ RETURN_IF_FAIL(stack_frame->SetKey(key_name, new_boxed_getter.Get(),
+ original_getter_metadata.Get()));
+ overridden_properties_.push_back(
+ {stack_frame, reinterpret_cast<const char16_t*>(key_name),
+ original_boxed_getter.Get(), original_getter_metadata.Get()});
+ return S_OK;
+}
+
+Extension::PropertyOverride::PropertyOverride() = default;
+Extension::PropertyOverride::PropertyOverride(IModelObject* parent,
+ std::u16string key_name,
+ IModelObject* original_value,
+ IKeyStore* original_metadata)
+ : parent(parent),
+ key_name(std::move(key_name)),
+ original_value(original_value),
+ original_metadata(original_metadata) {}
+Extension::PropertyOverride::~PropertyOverride() = default;
+Extension::PropertyOverride::PropertyOverride(const PropertyOverride&) =
+ default;
+Extension::PropertyOverride& Extension::PropertyOverride::operator=(
+ const PropertyOverride&) = default;
+
+Extension::~Extension() {
+ sp_debug_host_extensibility->DestroyFunctionAlias(pcur_isolate);
+ sp_debug_host_extensibility->DestroyFunctionAlias(plist_chunks);
+ sp_debug_host_extensibility->DestroyFunctionAlias(pv8_object);
+
+ for (const auto& registered : registered_object_types_) {
+ sp_data_model_manager->UnregisterModelForTypeSignature(
+ sp_object_data_model_.Get(), registered.Get());
+ }
+ for (const auto& registered : registered_handle_types_) {
+ sp_data_model_manager->UnregisterModelForTypeSignature(
+ sp_local_data_model_.Get(), registered.Get());
+ }
+
+ for (const auto& override : overridden_properties_) {
+ override.parent->SetKey(
+ reinterpret_cast<const wchar_t*>(override.key_name.c_str()),
+ override.original_value.Get(), override.original_metadata.Get());
+ }
+}
diff --git a/deps/v8/tools/v8windbg/src/v8windbg-extension.h b/deps/v8/tools/v8windbg/src/v8windbg-extension.h
new file mode 100644
index 0000000000..d54f43c847
--- /dev/null
+++ b/deps/v8/tools/v8windbg/src/v8windbg-extension.h
@@ -0,0 +1,81 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_
+#define V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_
+
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "tools/v8windbg/base/utilities.h"
+
+// Responsible for initializing and uninitializing the extension. Also provides
+// various convenience functions.
+class Extension {
+ public:
+ Extension();
+ HRESULT Initialize();
+ ~Extension();
+ WRL::ComPtr<IDebugHostModule> GetV8Module(
+ WRL::ComPtr<IDebugHostContext>& sp_ctx);
+ WRL::ComPtr<IDebugHostType> GetTypeFromV8Module(
+ WRL::ComPtr<IDebugHostContext>& sp_ctx, const char16_t* type_name);
+ WRL::ComPtr<IDebugHostType> GetV8ObjectType(
+ WRL::ComPtr<IDebugHostContext>& sp_ctx);
+ void TryRegisterType(WRL::ComPtr<IDebugHostType>& sp_type,
+ std::u16string type_name);
+ bool DoesTypeDeriveFromObject(const WRL::ComPtr<IDebugHostType>& sp_type);
+ static Extension* Current() { return current_extension_.get(); }
+ static void SetExtension(std::unique_ptr<Extension> new_extension) {
+ current_extension_ = std::move(new_extension);
+ }
+
+ // Returns the parent model for instances of v8::internal::Object and similar
+ // classes, which contain as their first and only field a tagged V8 value.
+ IModelObject* GetObjectDataModel() { return sp_object_data_model_.Get(); }
+
+ // Returns the parent model that provides indexing support for fields that
+ // contain arrays of something more complicated than basic native types.
+ IModelObject* GetIndexedFieldDataModel() {
+ return sp_indexed_field_model_.Get();
+ }
+
+ private:
+ HRESULT OverrideLocalsGetter(IModelObject* parent, const wchar_t* key_name,
+ bool is_parameters);
+
+ // A property that has been overridden by this extension. The original value
+ // must be put back in place during ~Extension.
+ struct PropertyOverride {
+ PropertyOverride();
+ PropertyOverride(IModelObject* parent, std::u16string key_name,
+ IModelObject* original_value,
+ IKeyStore* original_metadata);
+ ~PropertyOverride();
+ PropertyOverride(const PropertyOverride&);
+ PropertyOverride& operator=(const PropertyOverride&);
+ WRL::ComPtr<IModelObject> parent;
+ std::u16string key_name;
+ WRL::ComPtr<IModelObject> original_value;
+ WRL::ComPtr<IKeyStore> original_metadata;
+ };
+
+ static std::unique_ptr<Extension> current_extension_;
+
+ WRL::ComPtr<IModelObject> sp_object_data_model_;
+ WRL::ComPtr<IModelObject> sp_local_data_model_;
+ WRL::ComPtr<IModelObject> sp_indexed_field_model_;
+
+ WRL::ComPtr<IDebugHostModule> sp_v8_module_;
+ std::unordered_map<std::u16string, WRL::ComPtr<IDebugHostType>>
+ cached_v8_module_types_;
+ std::vector<WRL::ComPtr<IDebugHostTypeSignature>> registered_object_types_;
+ std::vector<WRL::ComPtr<IDebugHostTypeSignature>> registered_handle_types_;
+ std::vector<PropertyOverride> overridden_properties_;
+ WRL::ComPtr<IDebugHostContext> sp_v8_module_ctx_;
+ ULONG v8_module_proc_id_;
+};
+
+#endif // V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_
diff --git a/deps/v8/tools/v8windbg/test/debug-callbacks.cc b/deps/v8/tools/v8windbg/test/debug-callbacks.cc
new file mode 100644
index 0000000000..0b11195bdb
--- /dev/null
+++ b/deps/v8/tools/v8windbg/test/debug-callbacks.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/test/debug-callbacks.h"
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+MyOutput::MyOutput(WRL::ComPtr<IDebugClient5> p_client) : p_client_(p_client) {
+ p_client_->SetOutputCallbacks(this);
+}
+
+MyOutput::~MyOutput() { p_client_->SetOutputCallbacks(nullptr); }
+
+HRESULT __stdcall MyOutput::QueryInterface(REFIID InterfaceId,
+ PVOID* Interface) {
+ return E_NOTIMPL;
+}
+ULONG __stdcall MyOutput::AddRef(void) { return 0; }
+ULONG __stdcall MyOutput::Release(void) { return 0; }
+HRESULT __stdcall MyOutput::Output(ULONG Mask, PCSTR Text) {
+ if (Mask & DEBUG_OUTPUT_NORMAL) {
+ log_ += Text;
+ }
+ return S_OK;
+}
+
+HRESULT __stdcall MyCallback::QueryInterface(REFIID InterfaceId,
+ PVOID* Interface) {
+ return E_NOTIMPL;
+}
+ULONG __stdcall MyCallback::AddRef(void) { return S_OK; }
+ULONG __stdcall MyCallback::Release(void) { return S_OK; }
+HRESULT __stdcall MyCallback::GetInterestMask(PULONG Mask) {
+ *Mask = DEBUG_EVENT_BREAKPOINT | DEBUG_EVENT_CREATE_PROCESS;
+ return S_OK;
+}
+HRESULT __stdcall MyCallback::Breakpoint(PDEBUG_BREAKPOINT Bp) {
+ ULONG64 bp_offset;
+ HRESULT hr = Bp->GetOffset(&bp_offset);
+ if (FAILED(hr)) return hr;
+
+ // Break on breakpoints? Seems reasonable.
+ return DEBUG_STATUS_BREAK;
+}
+HRESULT __stdcall MyCallback::Exception(PEXCEPTION_RECORD64 Exception,
+ ULONG FirstChance) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::CreateThread(ULONG64 Handle, ULONG64 DataOffset,
+ ULONG64 StartOffset) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::ExitThread(ULONG ExitCode) { return E_NOTIMPL; }
+HRESULT __stdcall MyCallback::ExitProcess(ULONG ExitCode) { return E_NOTIMPL; }
+HRESULT __stdcall MyCallback::LoadModule(ULONG64 ImageFileHandle,
+ ULONG64 BaseOffset, ULONG ModuleSize,
+ PCSTR ModuleName, PCSTR ImageName,
+ ULONG CheckSum, ULONG TimeDateStamp) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::UnloadModule(PCSTR ImageBaseName,
+ ULONG64 BaseOffset) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::SystemError(ULONG Error, ULONG Level) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::SessionStatus(ULONG Status) { return E_NOTIMPL; }
+HRESULT __stdcall MyCallback::ChangeDebuggeeState(ULONG Flags,
+ ULONG64 Argument) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::ChangeEngineState(ULONG Flags, ULONG64 Argument) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::ChangeSymbolState(ULONG Flags, ULONG64 Argument) {
+ return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::CreateProcessW(
+ ULONG64 ImageFileHandle, ULONG64 Handle, ULONG64 BaseOffset,
+ ULONG ModuleSize, PCSTR ModuleName, PCSTR ImageName, ULONG CheckSum,
+ ULONG TimeDateStamp, ULONG64 InitialThreadHandle, ULONG64 ThreadDataOffset,
+ ULONG64 StartOffset) {
+ // Should fire once the target process is launched. Break to create
+ // breakpoints, etc.
+ return DEBUG_STATUS_BREAK;
+}
+
+} // namespace v8windbg_test
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/tools/v8windbg/test/debug-callbacks.h b/deps/v8/tools/v8windbg/test/debug-callbacks.h
new file mode 100644
index 0000000000..8855d6ffbc
--- /dev/null
+++ b/deps/v8/tools/v8windbg/test/debug-callbacks.h
@@ -0,0 +1,90 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_TEST_DEBUG_CALLBACKS_H_
+#define V8_TOOLS_V8WINDBG_TEST_DEBUG_CALLBACKS_H_
+
+#if !defined(UNICODE) || !defined(_UNICODE)
+#error Unicode not defined
+#endif
+
+#include <DbgEng.h>
+#include <DbgModel.h>
+#include <Windows.h>
+#include <crtdbg.h>
+#include <pathcch.h>
+#include <wrl/client.h>
+
+#include <string>
+
+namespace WRL = Microsoft::WRL;
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+class MyOutput : public IDebugOutputCallbacks {
+ public:
+ MyOutput(WRL::ComPtr<IDebugClient5> p_client);
+ ~MyOutput();
+ MyOutput(const MyOutput&) = delete;
+ MyOutput& operator=(const MyOutput&) = delete;
+
+ // Inherited via IDebugOutputCallbacks
+ HRESULT __stdcall QueryInterface(REFIID InterfaceId,
+ PVOID* Interface) override;
+ ULONG __stdcall AddRef(void) override;
+ ULONG __stdcall Release(void) override;
+ HRESULT __stdcall Output(ULONG Mask, PCSTR Text) override;
+
+ const std::string& GetLog() const { return log_; }
+ void ClearLog() { log_.clear(); }
+
+ private:
+ WRL::ComPtr<IDebugClient5> p_client_;
+ std::string log_;
+};
+
+// For return values, see:
+// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debug-status-xxx
+class MyCallback : public IDebugEventCallbacks {
+ public:
+ // Inherited via IDebugEventCallbacks
+ HRESULT __stdcall QueryInterface(REFIID InterfaceId,
+ PVOID* Interface) override;
+ ULONG __stdcall AddRef(void) override;
+ ULONG __stdcall Release(void) override;
+ HRESULT __stdcall GetInterestMask(PULONG Mask) override;
+ HRESULT __stdcall Breakpoint(PDEBUG_BREAKPOINT Bp) override;
+ HRESULT __stdcall Exception(PEXCEPTION_RECORD64 Exception,
+ ULONG FirstChance) override;
+ HRESULT __stdcall CreateThread(ULONG64 Handle, ULONG64 DataOffset,
+ ULONG64 StartOffset) override;
+ HRESULT __stdcall ExitThread(ULONG ExitCode) override;
+ HRESULT __stdcall ExitProcess(ULONG ExitCode) override;
+ HRESULT __stdcall LoadModule(ULONG64 ImageFileHandle, ULONG64 BaseOffset,
+ ULONG ModuleSize, PCSTR ModuleName,
+ PCSTR ImageName, ULONG CheckSum,
+ ULONG TimeDateStamp) override;
+ HRESULT __stdcall UnloadModule(PCSTR ImageBaseName,
+ ULONG64 BaseOffset) override;
+ HRESULT __stdcall SystemError(ULONG Error, ULONG Level) override;
+ HRESULT __stdcall SessionStatus(ULONG Status) override;
+ HRESULT __stdcall ChangeDebuggeeState(ULONG Flags, ULONG64 Argument) override;
+ HRESULT __stdcall ChangeEngineState(ULONG Flags, ULONG64 Argument) override;
+ HRESULT __stdcall ChangeSymbolState(ULONG Flags, ULONG64 Argument) override;
+ HRESULT __stdcall CreateProcessW(ULONG64 ImageFileHandle, ULONG64 Handle,
+ ULONG64 BaseOffset, ULONG ModuleSize,
+ PCSTR ModuleName, PCSTR ImageName,
+ ULONG CheckSum, ULONG TimeDateStamp,
+ ULONG64 InitialThreadHandle,
+ ULONG64 ThreadDataOffset,
+ ULONG64 StartOffset) override;
+};
+
+} // namespace v8windbg_test
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TOOLS_V8WINDBG_TEST_DEBUG_CALLBACKS_H_
diff --git a/deps/v8/tools/v8windbg/test/script.js b/deps/v8/tools/v8windbg/test/script.js
new file mode 100644
index 0000000000..6ec21e0bc4
--- /dev/null
+++ b/deps/v8/tools/v8windbg/test/script.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function a() {
+ JSON.stringify({firstProp: 12345, secondProp: null}, function replacer() {});
+}
+
+function b() {
+ var hello = 'hello';
+ a();
+}
+
+b();
diff --git a/deps/v8/tools/v8windbg/test/v8windbg-test.cc b/deps/v8/tools/v8windbg/test/v8windbg-test.cc
new file mode 100644
index 0000000000..59414f341d
--- /dev/null
+++ b/deps/v8/tools/v8windbg/test/v8windbg-test.cc
@@ -0,0 +1,243 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdio>
+#include <exception>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "tools/v8windbg/test/debug-callbacks.h"
+
+// See the docs at
+// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/using-the-debugger-engine-api
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+namespace {
+
+// Loads a named extension library upon construction and unloads it upon
+// destruction.
+class LoadExtensionScope {
+ public:
+ LoadExtensionScope(WRL::ComPtr<IDebugControl4> p_debug_control,
+ std::wstring extension_path)
+ : p_debug_control_(p_debug_control),
+ extension_path_(std::move(extension_path)) {
+ p_debug_control->AddExtensionWide(extension_path_.c_str(), 0, &ext_handle_);
+ // HACK: Below fails, but is required for the extension to actually
+ // initialize. Just the AddExtension call doesn't actually load and
+ // initialize it.
+ p_debug_control->CallExtension(ext_handle_, "Foo", "Bar");
+ }
+ ~LoadExtensionScope() {
+ // Let the extension uninitialize so it can deallocate memory, meaning any
+ // reported memory leaks should be real bugs.
+ p_debug_control_->RemoveExtension(ext_handle_);
+ }
+
+ private:
+ LoadExtensionScope(const LoadExtensionScope&) = delete;
+ LoadExtensionScope& operator=(const LoadExtensionScope&) = delete;
+ WRL::ComPtr<IDebugControl4> p_debug_control_;
+ ULONG64 ext_handle_;
+ // This string is part of the heap snapshot when the extension loads, so keep
+ // it alive until after the extension unloads and checks for any heap changes.
+ std::wstring extension_path_;
+};
+
+// Initializes COM upon construction and uninitializes it upon destruction.
+class ComScope {
+ public:
+ ComScope() { hr_ = CoInitializeEx(nullptr, COINIT_MULTITHREADED); }
+ ~ComScope() {
+ // "To close the COM library gracefully on a thread, each successful call to
+ // CoInitialize or CoInitializeEx, including any call that returns S_FALSE,
+ // must be balanced by a corresponding call to CoUninitialize."
+ // https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializeex
+ if (SUCCEEDED(hr_)) {
+ CoUninitialize();
+ }
+ }
+ HRESULT hr() { return hr_; }
+
+ private:
+ HRESULT hr_;
+};
+
+// Sets a breakpoint. Returns S_OK if the function name resolved successfully
+// and the breakpoint is in a non-deferred state.
+HRESULT SetBreakpoint(WRL::ComPtr<IDebugControl4> p_debug_control,
+ const char* function_name) {
+ WRL::ComPtr<IDebugBreakpoint> bp;
+ HRESULT hr =
+ p_debug_control->AddBreakpoint(DEBUG_BREAKPOINT_CODE, DEBUG_ANY_ID, &bp);
+ if (FAILED(hr)) return hr;
+ hr = bp->SetOffsetExpression(function_name);
+ if (FAILED(hr)) return hr;
+ hr = bp->AddFlags(DEBUG_BREAKPOINT_ENABLED);
+ if (FAILED(hr)) return hr;
+
+ // Check whether the symbol could be found.
+ uint64_t offset;
+ hr = bp->GetOffset(&offset);
+ return hr;
+}
+
+// Sets a breakpoint. Depending on the build configuration, the function might
+// be in the v8 or d8 module, so this function tries to set both.
+HRESULT SetBreakpointInV8OrD8(WRL::ComPtr<IDebugControl4> p_debug_control,
+ const std::string& function_name) {
+ // Component builds call the V8 module "v8". Try this first, because there is
+ // also a module named "d8" or "d8_exe" where we should avoid attempting to
+ // set a breakpoint.
+ HRESULT hr = SetBreakpoint(p_debug_control, ("v8!" + function_name).c_str());
+ if (SUCCEEDED(hr)) return hr;
+
+ // x64 release builds call it "d8".
+ hr = SetBreakpoint(p_debug_control, ("d8!" + function_name).c_str());
+ if (SUCCEEDED(hr)) return hr;
+
+ // x86 release builds call it "d8_exe".
+ return SetBreakpoint(p_debug_control, ("d8_exe!" + function_name).c_str());
+}
+
+void RunAndCheckOutput(const char* friendly_name, const char* command,
+ std::vector<const char*> expected_substrings,
+ MyOutput* output, IDebugControl4* p_debug_control) {
+ output->ClearLog();
+ CHECK(SUCCEEDED(p_debug_control->Execute(DEBUG_OUTCTL_ALL_CLIENTS, command,
+ DEBUG_EXECUTE_ECHO)));
+ for (const char* expected : expected_substrings) {
+ CHECK(output->GetLog().find(expected) != std::string::npos);
+ }
+}
+
+} // namespace
+
+void RunTests() {
+ // Initialize COM... Though it doesn't seem to matter if you don't!
+ ComScope com_scope;
+ CHECK(SUCCEEDED(com_scope.hr()));
+
+ // Get the file path of the module containing this test function. It should be
+ // in the output directory alongside the data dependencies required by this
+ // test (d8.exe, v8windbg.dll, and v8windbg-test-script.js).
+ HMODULE module = nullptr;
+ bool success =
+ GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast<LPCWSTR>(&RunTests), &module);
+ CHECK(success);
+ wchar_t this_module_path[MAX_PATH];
+ DWORD path_size = GetModuleFileName(module, this_module_path, MAX_PATH);
+ CHECK(path_size != 0);
+ HRESULT hr = PathCchRemoveFileSpec(this_module_path, MAX_PATH);
+ CHECK(SUCCEEDED(hr));
+
+ // Get the Debug client
+ WRL::ComPtr<IDebugClient5> p_client;
+ hr = DebugCreate(__uuidof(IDebugClient5), &p_client);
+ CHECK(SUCCEEDED(hr));
+
+ WRL::ComPtr<IDebugSymbols3> p_symbols;
+ hr = p_client->QueryInterface(__uuidof(IDebugSymbols3), &p_symbols);
+ CHECK(SUCCEEDED(hr));
+
+ // Symbol loading fails if the pdb is in the same folder as the exe, but it's
+ // not on the path.
+ hr = p_symbols->SetSymbolPathWide(this_module_path);
+ CHECK(SUCCEEDED(hr));
+
+ // Set the event callbacks
+ MyCallback callback;
+ hr = p_client->SetEventCallbacks(&callback);
+ CHECK(SUCCEEDED(hr));
+
+ // Launch the process with the debugger attached
+ std::wstring command_line =
+ std::wstring(L"\"") + this_module_path + L"\\d8.exe\" \"" +
+ this_module_path + L"\\obj\\tools\\v8windbg\\v8windbg-test-script.js\"";
+ DEBUG_CREATE_PROCESS_OPTIONS proc_options;
+ proc_options.CreateFlags = DEBUG_PROCESS;
+ proc_options.EngCreateFlags = 0;
+ proc_options.VerifierFlags = 0;
+ proc_options.Reserved = 0;
+ hr = p_client->CreateProcessWide(
+ 0, const_cast<wchar_t*>(command_line.c_str()), DEBUG_PROCESS);
+ CHECK(SUCCEEDED(hr));
+
+ // Wait for the attach event
+ WRL::ComPtr<IDebugControl4> p_debug_control;
+ hr = p_client->QueryInterface(__uuidof(IDebugControl4), &p_debug_control);
+ CHECK(SUCCEEDED(hr));
+ hr = p_debug_control->WaitForEvent(0, INFINITE);
+ CHECK(SUCCEEDED(hr));
+
+ // Break again after non-delay-load modules are loaded.
+ hr = p_debug_control->AddEngineOptions(DEBUG_ENGOPT_INITIAL_BREAK);
+ CHECK(SUCCEEDED(hr));
+ hr = p_debug_control->WaitForEvent(0, INFINITE);
+ CHECK(SUCCEEDED(hr));
+
+ // Set a breakpoint in a C++ function called by the script.
+ hr = SetBreakpointInV8OrD8(p_debug_control, "v8::internal::JsonStringify");
+ CHECK(SUCCEEDED(hr));
+
+ hr = p_debug_control->SetExecutionStatus(DEBUG_STATUS_GO);
+ CHECK(SUCCEEDED(hr));
+
+ // Wait for the breakpoint.
+ hr = p_debug_control->WaitForEvent(0, INFINITE);
+ CHECK(SUCCEEDED(hr));
+
+ ULONG type, proc_id, thread_id, desc_used;
+ byte desc[1024];
+ hr = p_debug_control->GetLastEventInformation(
+ &type, &proc_id, &thread_id, nullptr, 0, nullptr,
+ reinterpret_cast<PSTR>(desc), 1024, &desc_used);
+ CHECK(SUCCEEDED(hr));
+
+ LoadExtensionScope extension_loaded(
+ p_debug_control, this_module_path + std::wstring(L"\\v8windbg.dll"));
+
+ // Set the output callbacks after the extension is loaded, so it gets
+ // destroyed before the extension unloads. This avoids reporting incorrectly
+ // reporting that the output buffer was leaked during extension teardown.
+ MyOutput output(p_client);
+
+ // Set stepping mode.
+ hr = p_debug_control->SetCodeLevel(DEBUG_LEVEL_SOURCE);
+ CHECK(SUCCEEDED(hr));
+
+ // Do some actual testing
+ RunAndCheckOutput("bitfields",
+ "p;dx replacer.Value.shared_function_info.flags",
+ {"kNamedExpression"}, &output, p_debug_control.Get());
+
+ RunAndCheckOutput("in-object properties",
+ "dx object.Value.@\"in-object properties\"[1]",
+ {"NullValue", "Oddball"}, &output, p_debug_control.Get());
+
+ RunAndCheckOutput(
+ "arrays of structs",
+ "dx object.Value.map.instance_descriptors.descriptors[1].key",
+ {"\"secondProp\"", "SeqOneByteString"}, &output, p_debug_control.Get());
+
+ RunAndCheckOutput(
+ "local variables",
+ "dx -r1 @$curthread.Stack.Frames.Where(f => "
+ "f.ToDisplayString().Contains(\"InterpreterEntryTrampoline\")).Skip(1)."
+ "First().LocalVariables.@\"memory interpreted as Objects\"",
+ {"\"hello\""}, &output, p_debug_control.Get());
+
+ // Detach before exiting
+ hr = p_client->DetachProcesses();
+ CHECK(SUCCEEDED(hr));
+}
+
+} // namespace v8windbg_test
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/tools/v8windbg/test/v8windbg-test.h b/deps/v8/tools/v8windbg/test/v8windbg-test.h
new file mode 100644
index 0000000000..48a845470d
--- /dev/null
+++ b/deps/v8/tools/v8windbg/test/v8windbg-test.h
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_TEST_V8WINDBG_TEST_H_
+#define V8_TOOLS_V8WINDBG_TEST_V8WINDBG_TEST_H_
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+void RunTests();
+
+}
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TOOLS_V8WINDBG_TEST_V8WINDBG_TEST_H_
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index e55c9c427d..482c695b65 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,10 +7,10 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles.....................
-I can't add trailing whitespaces, so I'm adding this line...
+The bartender starts to shake the bottles......................
+I can't add trailing whitespaces, so I'm adding this line.....
I'm starting to think that just adding trailing whitespaces might not be bad.
-Because whitespaces are not that funny.....
+Because whitespaces are not that funny......
Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6727!